From 8512e3ea22708ad40748e2325180fafc8c0155c2 Mon Sep 17 00:00:00 2001
From: hibobmaster <32976627+hibobmaster@users.noreply.github.com>
Date: Wed, 13 Sep 2023 15:27:34 +0800
Subject: [PATCH] Optimize
---
.env.example | 20 +-
.full-env.example | 20 ++
README.md | 6 +-
compose.yaml | 25 +-
config.json.sample | 20 +-
full-config.json.sample | 22 ++
requirements-dev.txt | 9 +
requirements.txt | 2 +-
settings.js.example | 101 -------
src/askgpt.py | 45 ---
src/bot.py | 596 ++++++++++------------------------------
src/flowise.py | 18 +-
src/gptbot.py | 292 ++++++++++++++++++++
src/main.py | 72 +++--
src/pandora_api.py | 111 --------
src/send_message.py | 40 +--
sync_db | Bin 0 -> 135168 bytes
17 files changed, 562 insertions(+), 837 deletions(-)
create mode 100644 .full-env.example
create mode 100644 full-config.json.sample
create mode 100644 requirements-dev.txt
delete mode 100644 settings.js.example
delete mode 100644 src/askgpt.py
create mode 100644 src/gptbot.py
delete mode 100644 src/pandora_api.py
create mode 100644 sync_db
diff --git a/.env.example b/.env.example
index 8d347cd..9922bbf 100644
--- a/.env.example
+++ b/.env.example
@@ -1,20 +1,6 @@
-# Please remove the option that is blank
-HOMESERVER="https://matrix.xxxxxx.xxxx" # required
+HOMESERVER="https://matrix-client.matrix.org" # required
USER_ID="@lullap:xxxxxxxxxxxxx.xxx" # required
-PASSWORD="xxxxxxxxxxxxxxx" # Optional
-DEVICE_ID="xxxxxxxxxxxxxx" # required
+PASSWORD="xxxxxxxxxxxxxxx" # Optional if you use access token
+DEVICE_ID="MatrixChatGPTBot" # required
ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX" # Optional, if not set, bot will work on the room it is in
OPENAI_API_KEY="xxxxxxxxxxxxxxxxx" # Optional, for !chat and !gpt command
-API_ENDPOINT="xxxxxxxxxxxxxxx" # Optional, for !chat and !bing command
-ACCESS_TOKEN="xxxxxxxxxxxxxxxxxxxxx" # Optional, use user_id and password is recommended
-BARD_TOKEN="xxxxxxxxxxxxxxxxxxxx", # Optional, for !bard command
-BING_AUTH_COOKIE="xxxxxxxxxxxxxxxxxxx" # _U cookie, Optional, for Bing Image Creator
-MARKDOWN_FORMATTED="true" # Optional
-OUTPUT_FOUR_IMAGES="true" # Optional
-IMPORT_KEYS_PATH="element-keys.txt" # Optional, used for E2EE Room
-IMPORT_KEYS_PASSWORD="xxxxxxx" # Optional
-FLOWISE_API_URL="http://localhost:3000/api/v1/prediction/xxxx" # Optional
-FLOWISE_API_KEY="xxxxxxxxxxxxxxxxxxxxxxx" # Optional
-PANDORA_API_ENDPOINT="http://pandora:8008" # Optional, for !talk, !goon command
-PANDORA_API_MODEL="text-davinci-002-render-sha-mobile" # Optional
-TEMPERATURE="0.8" # Optional
diff --git a/.full-env.example b/.full-env.example
new file mode 100644
index 0000000..d1c9f2c
--- /dev/null
+++ b/.full-env.example
@@ -0,0 +1,20 @@
+HOMESERVER="https://matrix-client.matrix.org"
+USER_ID="@lullap:xxxxxxxxxxxxx.xxx"
+PASSWORD="xxxxxxxxxxxxxxx"
+DEVICE_ID="xxxxxxxxxxxxxx"
+ROOM_ID="!FYCmBSkCRUXXXXXXXXX:matrix.XXX.XXX"
+IMPORT_KEYS_PATH="element-keys.txt"
+IMPORT_KEYS_PASSWORD="xxxxxxxxxxxx"
+OPENAI_API_KEY="xxxxxxxxxxxxxxxxx"
+GPT_API_ENDPOINT="https://api.openai.com/v1/chat/completions"
+GPT_MODEL="gpt-3.5-turbo"
+MAX_TOKENS=4000
+TOP_P=1.0
+PRESENCE_PENALTY=0.0
+FREQUENCY_PENALTY=0.0
+REPLY_COUNT=1
+SYSTEM_PROMPT="You are ChatGPT, a large language model trained by OpenAI. Respond conversationally"
+TEMPERATURE=0.8
+FLOWISE_API_URL="http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21"
+FLOWISE_API_KEY="U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A="
+TIMEOUT=120.0
diff --git a/README.md b/README.md
index b591b59..d25e6fb 100644
--- a/README.md
+++ b/README.md
@@ -44,12 +44,8 @@ pip install -r requirements.txt
```
3. Create a new config.json file and complete it with the necessary information:
- Use password to login(recommended) or provide `access_token`
If not set:
`room_id`: bot will work in the room where it is in
- `openai_api_key`: `!gpt` `!chat` command will not work
- `api_endpoint`: `!bing` `!chat` command will not work
- `bing_auth_cookie`: `!pic` command will not work
```json
{
@@ -59,7 +55,7 @@ pip install -r requirements.txt
"device_id": "YOUR_DEVICE_ID",
"room_id": "YOUR_ROOM_ID",
"openai_api_key": "YOUR_API_KEY",
- "api_endpoint": "xxxxxxxxx"
+ "gpt_api_endpoint": "xxxxxxxxx"
}
```
diff --git a/compose.yaml b/compose.yaml
index bf50a24..e3c67b8 100644
--- a/compose.yaml
+++ b/compose.yaml
@@ -11,32 +11,13 @@ services:
volumes:
# use env file or config.json
# - ./config.json:/app/config.json
- # use touch to create an empty file db, for persist database only
- - ./db:/app/db
+ # use touch to create empty db file, for persist database only
+ - ./sync_db:/app/sync_db
+ - ./manage_db:/app/manage_db
# import_keys path
# - ./element-keys.txt:/app/element-keys.txt
networks:
- matrix_network
- api:
- # ChatGPT and Bing API
- image: hibobmaster/node-chatgpt-api:latest
- container_name: node-chatgpt-api
- restart: unless-stopped
- volumes:
- - ./settings.js:/app/settings.js
- networks:
- - matrix_network
-
- # pandora:
- # # ChatGPT Web
- # image: pengzhile/pandora
- # container_name: pandora
- # restart: unless-stopped
- # environment:
- # - PANDORA_ACCESS_TOKEN=xxxxxxxxxxxxxx
- # - PANDORA_SERVER=0.0.0.0:8008
- # networks:
- # - matrix_network
networks:
matrix_network:
diff --git a/config.json.sample b/config.json.sample
index 56e4365..05f493e 100644
--- a/config.json.sample
+++ b/config.json.sample
@@ -1,21 +1,7 @@
{
- "homeserver": "https://matrix.qqs.tw",
+ "homeserver": "https://matrix-client.matrix.org",
"user_id": "@lullap:xxxxx.org",
"password": "xxxxxxxxxxxxxxxxxx",
- "device_id": "ECYEOKVPLG",
- "room_id": "!FYCmBSkCRUNvZDBaDQ:matrix.qqs.tw",
- "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
- "api_endpoint": "http://api:3000/conversation",
- "access_token": "xxxxxxx",
- "bard_token": "xxxxxxx",
- "bing_auth_cookie": "xxxxxxxxxxx",
- "markdown_formatted": true,
- "output_four_images": true,
- "import_keys_path": "element-keys.txt",
- "import_keys_password": "xxxxxxxxx",
- "flowise_api_url": "http://localhost:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
- "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
- "pandora_api_endpoint": "http://127.0.0.1:8008",
- "pandora_api_model": "text-davinci-002-render-sha-mobile",
- "temperature": 0.8
+ "device_id": "MatrixChatGPTBot",
+ "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx"
}
diff --git a/full-config.json.sample b/full-config.json.sample
new file mode 100644
index 0000000..6d62d4e
--- /dev/null
+++ b/full-config.json.sample
@@ -0,0 +1,22 @@
+{
+ "homeserver": "https://matrix-client.matrix.org",
+ "user_id": "@lullap:xxxxx.org",
+ "password": "xxxxxxxxxxxxxxxxxx",
+ "device_id": "MatrixChatGPTBot",
+ "room_id": "!xxxxxxxxxxxxxxxxxxxxxx:xxxxx.org",
+ "import_keys_path": "element-keys.txt",
+ "import_keys_password": "xxxxxxxxxxxxxxxxxxxx",
+ "openai_api_key": "xxxxxxxxxxxxxxxxxxxxxxxx",
+ "gpt_api_endpoint": "https://api.openai.com/v1/chat/completions",
+ "gpt_model": "gpt-3.5-turbo",
+ "max_tokens": 4000,
+ "top_p": 1.0,
+ "presence_penalty": 0.0,
+ "frequency_penalty": 0.0,
+ "reply_count": 1,
+ "temperature": 0.8,
+ "system_prompt": "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally",
+ "flowise_api_url": "http://flowise:3000/api/v1/prediction/6deb3c89-45bf-4ac4-a0b0-b2d5ef249d21",
+ "flowise_api_key": "U3pe0bbVDWOyoJtsDzFJjRvHKTP3FRjODwuM78exC3A=",
+ "timeout": 120.0
+}
diff --git a/requirements-dev.txt b/requirements-dev.txt
new file mode 100644
index 0000000..39a9b58
--- /dev/null
+++ b/requirements-dev.txt
@@ -0,0 +1,9 @@
+aiofiles
+httpx
+Markdown
+matrix-nio[e2e]
+Pillow
+tiktoken
+tenacity
+python-magic
+pytest
diff --git a/requirements.txt b/requirements.txt
index e884258..85bf06f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,5 @@
aiofiles
-aiohttp
+httpx
Markdown
matrix-nio[e2e]
Pillow
diff --git a/settings.js.example b/settings.js.example
deleted file mode 100644
index 57ec272..0000000
--- a/settings.js.example
+++ /dev/null
@@ -1,101 +0,0 @@
-export default {
- // Options for the Keyv cache, see https://www.npmjs.com/package/keyv.
- // This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default).
- // Only necessary when using `ChatGPTClient`, or `BingAIClient` in jailbreak mode.
- cacheOptions: {},
- // If set, `ChatGPTClient` and `BingAIClient` will use `keyv-file` to store conversations to this JSON file instead of in memory.
- // However, `cacheOptions.store` will override this if set
- storageFilePath: process.env.STORAGE_FILE_PATH || './cache.json',
- chatGptClient: {
- // Your OpenAI API key (for `ChatGPTClient`)
- openaiApiKey: process.env.OPENAI_API_KEY || '',
- // (Optional) Support for a reverse proxy for the completions endpoint (private API server).
- // Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
- // reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
- // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
- modelOptions: {
- // You can override the model name and any other parameters here.
- // The default model is `gpt-3.5-turbo`.
- model: 'gpt-3.5-turbo',
- // Set max_tokens here to override the default max_tokens of 1000 for the completion.
- // max_tokens: 1000,
- },
- // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
- // maxContextTokens: 4097,
- // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
- // Earlier messages will be dropped until the prompt is within the limit.
- // maxPromptTokens: 3097,
- // (Optional) Set custom instructions instead of "You are ChatGPT...".
- // (Optional) Set a custom name for the user
- // userLabel: 'User',
- // (Optional) Set a custom name for ChatGPT ("ChatGPT" by default)
- // chatGptLabel: 'Bob',
- // promptPrefix: 'You are Bob, a cowboy in Western times...',
- // A proxy string like "http://:"
- proxy: '',
- // (Optional) Set to true to enable `console.debug()` logging
- debug: false,
- },
- // Options for the Bing client
- bingAiClient: {
- // Necessary for some people in different countries, e.g. China (https://cn.bing.com)
- host: '',
- // The "_U" cookie value from bing.com
- userToken: '',
- // If the above doesn't work, provide all your cookies as a string instead
- cookies: '',
- // A proxy string like "http://:"
- proxy: '',
- // (Optional) Set 'x-forwarded-for' for the request. You can use a fixed IPv4 address or specify a range using CIDR notation,
- // and the program will randomly select an address within that range. The 'x-forwarded-for' is not used by default now.
- // xForwardedFor: '13.104.0.0/14',
- // (Optional) Set 'genImage' to true to enable bing to create images for you. It's disabled by default.
- // features: {
- // genImage: true,
- // },
- // (Optional) Set to true to enable `console.debug()` logging
- debug: false,
- },
- chatGptBrowserClient: {
- // (Optional) Support for a reverse proxy for the conversation endpoint (private API server).
- // Warning: This will expose your access token to a third party. Consider the risks before using this.
- reverseProxyUrl: 'https://bypass.churchless.tech/api/conversation',
- // Access token from https://chat.openai.com/api/auth/session
- accessToken: '',
- // Cookies from chat.openai.com (likely not required if using reverse proxy server).
- cookies: '',
- // A proxy string like "http://:"
- proxy: '',
- // (Optional) Set to true to enable `console.debug()` logging
- debug: false,
- },
- // Options for the API server
- apiOptions: {
- port: process.env.API_PORT || 3000,
- host: process.env.API_HOST || 'localhost',
- // (Optional) Set to true to enable `console.debug()` logging
- debug: false,
- // (Optional) Possible options: "chatgpt", "chatgpt-browser", "bing". (Default: "chatgpt")
- // clientToUse: 'bing',
- // (Optional) Generate titles for each conversation for clients that support it (only ChatGPTClient for now).
- // This will be returned as a `title` property in the first response of the conversation.
- generateTitles: false,
- // (Optional) Set this to allow changing the client or client options in POST /conversation.
- // To disable, set to `null`.
- perMessageClientOptionsWhitelist: {
- // The ability to switch clients using `clientOptions.clientToUse` will be disabled if `validClientsToUse` is not set.
- // To allow switching clients per message, you must set `validClientsToUse` to a non-empty array.
- validClientsToUse: ['bing', 'chatgpt'], // values from possible `clientToUse` options above
- // The Object key, e.g. "chatgpt", is a value from `validClientsToUse`.
- // If not set, ALL options will be ALLOWED to be changed. For example, `bing` is not defined in `perMessageClientOptionsWhitelist` above,
- // so all options for `bingAiClient` will be allowed to be changed.
- // If set, ONLY the options listed here will be allowed to be changed.
- // In this example, each array element is a string representing a property in `chatGptClient` above.
- },
- },
- // Options for the CLI app
- cliOptions: {
- // (Optional) Possible options: "chatgpt", "bing".
- // clientToUse: 'bing',
- },
-};
diff --git a/src/askgpt.py b/src/askgpt.py
deleted file mode 100644
index d3c37ca..0000000
--- a/src/askgpt.py
+++ /dev/null
@@ -1,45 +0,0 @@
-import json
-
-import aiohttp
-from log import getlogger
-
-logger = getlogger()
-
-
-class askGPT:
- def __init__(self, session: aiohttp.ClientSession):
- self.session = session
-
- async def oneTimeAsk(
- self, prompt: str, api_endpoint: str, headers: dict, temperature: float = 0.8
- ) -> str:
- jsons = {
- "model": "gpt-3.5-turbo",
- "messages": [
- {
- "role": "user",
- "content": prompt,
- },
- ],
- "temperature": temperature,
- }
- max_try = 2
- while max_try > 0:
- try:
- async with self.session.post(
- url=api_endpoint,
- json=jsons,
- headers=headers,
- timeout=120,
- ) as response:
- status_code = response.status
- if not status_code == 200:
- # print failed reason
- logger.warning(str(response.reason))
- max_try = max_try - 1
- continue
-
- resp = await response.read()
- return json.loads(resp)["choices"][0]["message"]["content"]
- except Exception as e:
- raise Exception(e)
diff --git a/src/bot.py b/src/bot.py
index ca57f6f..de785ef 100644
--- a/src/bot.py
+++ b/src/bot.py
@@ -5,9 +5,9 @@ import re
import sys
import traceback
from typing import Union, Optional
-import uuid
-import aiohttp
+import httpx
+
from nio import (
AsyncClient,
AsyncClientConfig,
@@ -28,19 +28,15 @@ from nio import (
)
from nio.store.database import SqliteStore
-from askgpt import askGPT
-from chatgpt_bing import GPTBOT
-from BingImageGen import ImageGenAsync
from log import getlogger
from send_image import send_room_image
from send_message import send_room_message
-from bard import Bardbot
from flowise import flowise_query
-from pandora_api import Pandora
+from gptbot import Chatbot
logger = getlogger()
-chatgpt_api_endpoint = "https://api.openai.com/v1/chat/completions"
-base_path = Path(os.path.dirname(__file__)).parent
+DEVICE_NAME = "MatrixChatGPTBot"
+GENERAL_ERROR_MESSAGE = "Something went wrong, please try again or contact admin."
class Bot:
@@ -48,77 +44,75 @@ class Bot:
self,
homeserver: str,
user_id: str,
- device_id: str,
- api_endpoint: Optional[str] = None,
- openai_api_key: Union[str, None] = None,
- temperature: Union[float, None] = None,
- room_id: Union[str, None] = None,
password: Union[str, None] = None,
- access_token: Union[str, None] = None,
- bard_token: Union[str, None] = None,
- jailbreakEnabled: Union[bool, None] = True,
- bing_auth_cookie: Union[str, None] = "",
- markdown_formatted: Union[bool, None] = False,
- output_four_images: Union[bool, None] = False,
+ device_id: str = "MatrixChatGPTBot",
+ room_id: Union[str, None] = None,
import_keys_path: Optional[str] = None,
import_keys_password: Optional[str] = None,
+ openai_api_key: Union[str, None] = None,
+ gpt_api_endpoint: Optional[str] = None,
+ gpt_model: Optional[str] = None,
+ max_tokens: Optional[int] = None,
+ top_p: Optional[float] = None,
+ presence_penalty: Optional[float] = None,
+ frequency_penalty: Optional[float] = None,
+ reply_count: Optional[int] = None,
+ system_prompt: Optional[str] = None,
+ temperature: Union[float, None] = None,
flowise_api_url: Optional[str] = None,
flowise_api_key: Optional[str] = None,
- pandora_api_endpoint: Optional[str] = None,
- pandora_api_model: Optional[str] = None,
+ timeout: Union[float, None] = None,
):
if homeserver is None or user_id is None or device_id is None:
logger.warning("homeserver && user_id && device_id is required")
sys.exit(1)
- if password is None and access_token is None:
- logger.warning("password or access_toekn is required")
+ if password is None:
+ logger.warning("password is required")
sys.exit(1)
- self.homeserver = homeserver
- self.user_id = user_id
- self.password = password
- self.access_token = access_token
- self.bard_token = bard_token
- self.device_id = device_id
- self.room_id = room_id
- self.openai_api_key = openai_api_key
- self.bing_auth_cookie = bing_auth_cookie
- self.api_endpoint = api_endpoint
- self.import_keys_path = import_keys_path
- self.import_keys_password = import_keys_password
- self.flowise_api_url = flowise_api_url
- self.flowise_api_key = flowise_api_key
- self.pandora_api_endpoint = pandora_api_endpoint
- self.temperature = temperature
+ self.homeserver: str = homeserver
+ self.user_id: str = user_id
+ self.password: str = password
+ self.device_id: str = device_id
+ self.room_id: str = room_id
- self.session = aiohttp.ClientSession()
+ self.openai_api_key: str = openai_api_key
+ self.gpt_api_endpoint: str = (
+ gpt_api_endpoint or "https://api.openai.com/v1/chat/completions"
+ )
+ self.gpt_model: str = gpt_model or "gpt-3.5-turbo"
+ self.max_tokens: int = max_tokens or 4000
+ self.top_p: float = top_p or 1.0
+ self.temperature: float = temperature or 0.8
+ self.presence_penalty: float = presence_penalty or 0.0
+ self.frequency_penalty: float = frequency_penalty or 0.0
+ self.reply_count: int = reply_count or 1
+ self.system_prompt: str = (
+ system_prompt
+ or "You are ChatGPT, \
+ a large language model trained by OpenAI. Respond conversationally"
+ )
- if openai_api_key is not None:
- if not self.openai_api_key.startswith("sk-"):
- logger.warning("invalid openai api key")
- sys.exit(1)
+ self.import_keys_path: str = import_keys_path
+ self.import_keys_password: str = import_keys_password
+ self.flowise_api_url: str = flowise_api_url
+ self.flowise_api_key: str = flowise_api_key
- if jailbreakEnabled is None:
- self.jailbreakEnabled = True
- else:
- self.jailbreakEnabled = jailbreakEnabled
+ self.timeout: float = timeout or 120.0
- if markdown_formatted is None:
- self.markdown_formatted = False
- else:
- self.markdown_formatted = markdown_formatted
+ self.base_path = Path(os.path.dirname(__file__)).parent
- if output_four_images is None:
- self.output_four_images = False
- else:
- self.output_four_images = output_four_images
+ self.httpx_client = httpx.AsyncClient(
+ follow_redirects=True,
+ timeout=self.timeout,
+ )
# initialize AsyncClient object
- self.store_path = base_path
+ self.store_path = self.base_path
self.config = AsyncClientConfig(
store=SqliteStore,
- store_name="db",
+ store_name="sync_db",
store_sync_tokens=True,
encryption_enabled=True,
)
@@ -130,8 +124,21 @@ class Bot:
store_path=self.store_path,
)
- if self.access_token is not None:
- self.client.access_token = self.access_token
+ # initialize Chatbot object
+ self.chatbot = Chatbot(
+ aclient=self.httpx_client,
+ api_key=self.openai_api_key,
+ api_url=self.gpt_api_endpoint,
+ engine=self.gpt_model,
+ timeout=self.timeout,
+ max_tokens=self.max_tokens,
+ top_p=self.top_p,
+ presence_penalty=self.presence_penalty,
+ frequency_penalty=self.frequency_penalty,
+ reply_count=self.reply_count,
+ system_prompt=self.system_prompt,
+ temperature=self.temperature,
+ )
# setup event callbacks
self.client.add_event_callback(self.message_callback, (RoomMessageText,))
@@ -144,81 +151,22 @@ class Bot:
# regular expression to match keyword commands
self.gpt_prog = re.compile(r"^\s*!gpt\s*(.+)$")
self.chat_prog = re.compile(r"^\s*!chat\s*(.+)$")
- self.bing_prog = re.compile(r"^\s*!bing\s*(.+)$")
- self.bard_prog = re.compile(r"^\s*!bard\s*(.+)$")
self.pic_prog = re.compile(r"^\s*!pic\s*(.+)$")
self.lc_prog = re.compile(r"^\s*!lc\s*(.+)$")
self.help_prog = re.compile(r"^\s*!help\s*.*$")
- self.talk_prog = re.compile(r"^\s*!talk\s*(.+)$")
- self.goon_prog = re.compile(r"^\s*!goon\s*.*$")
self.new_prog = re.compile(r"^\s*!new\s*(.+)$")
- # initialize askGPT class
- self.askgpt = askGPT(self.session)
- # request header for !gpt command
- self.gptheaders = {
- "Content-Type": "application/json",
- "Authorization": f"Bearer {self.openai_api_key}",
- }
-
- # initialize bing and chatgpt
- if self.api_endpoint is not None:
- self.gptbot = GPTBOT(self.api_endpoint, self.session)
- self.chatgpt_data = {}
- self.bing_data = {}
-
- # initialize BingImageGenAsync
- if self.bing_auth_cookie != "":
- self.imageGen = ImageGenAsync(self.bing_auth_cookie, quiet=True)
-
- # initialize pandora
- if pandora_api_endpoint is not None:
- self.pandora = Pandora(
- api_endpoint=pandora_api_endpoint, clientSession=self.session
- )
- if pandora_api_model is None:
- self.pandora_api_model = "text-davinci-002-render-sha-mobile"
- else:
- self.pandora_api_model = pandora_api_model
-
- self.pandora_data = {}
-
- # initialize bard
- self.bard_data = {}
-
- def __del__(self):
- try:
- loop = asyncio.get_running_loop()
- except RuntimeError:
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- loop.run_until_complete(self._close())
-
- async def _close(self):
- await self.session.close()
+ async def close(self, task: asyncio.Task) -> None:
+ await self.httpx_client.aclose()
+ await self.client.close()
+ task.cancel()
+ logger.info("Bot closed!")
def chatgpt_session_init(self, sender_id: str) -> None:
self.chatgpt_data[sender_id] = {
"first_time": True,
}
- def bing_session_init(self, sender_id: str) -> None:
- self.bing_data[sender_id] = {
- "first_time": True,
- }
-
- def pandora_session_init(self, sender_id: str) -> None:
- self.pandora_data[sender_id] = {
- "conversation_id": None,
- "parent_message_id": str(uuid.uuid4()),
- "first_time": True,
- }
-
- async def bard_session_init(self, sender_id: str) -> None:
- self.bard_data[sender_id] = {
- "instance": await Bardbot.create(self.bard_token, 60),
- }
-
# message_callback RoomMessageText event
async def message_callback(self, room: MatrixRoom, event: RoomMessageText) -> None:
if self.room_id is None:
@@ -267,7 +215,7 @@ class Bot:
except Exception as e:
logger.error(e, exc_info=True)
- if self.api_endpoint is not None:
+ if self.gpt_api_endpoint is not None:
# chatgpt
n = self.chat_prog.match(content_body)
if n:
@@ -293,58 +241,6 @@ class Bot:
self.client, room_id, reply_message="API_KEY not provided"
)
- # bing ai
- # if self.bing_api_endpoint != "":
- # bing ai can be used without cookie
- b = self.bing_prog.match(content_body)
- if b:
- if sender_id not in self.bing_data:
- self.bing_session_init(sender_id)
- prompt = b.group(1)
- # raw_content_body used for construct formatted_body
- try:
- asyncio.create_task(
- self.bing(
- room_id,
- reply_to_event_id,
- prompt,
- sender_id,
- raw_user_message,
- )
- )
- except Exception as e:
- logger.error(e, exc_info=True)
-
- # Image Generation by Microsoft Bing
- if self.bing_auth_cookie != "":
- i = self.pic_prog.match(content_body)
- if i:
- prompt = i.group(1)
- try:
- asyncio.create_task(self.pic(room_id, prompt))
- except Exception as e:
- logger.error(e, exc_info=True)
-
- # Google's Bard
- if self.bard_token is not None:
- if sender_id not in self.bard_data:
- await self.bard_session_init(sender_id)
- b = self.bard_prog.match(content_body)
- if b:
- prompt = b.group(1)
- try:
- asyncio.create_task(
- self.bard(
- room_id,
- reply_to_event_id,
- prompt,
- sender_id,
- raw_user_message,
- )
- )
- except Exception as e:
- logger.error(e, exc_info=True)
-
# lc command
if self.flowise_api_url is not None:
m = self.lc_prog.match(content_body)
@@ -364,46 +260,10 @@ class Bot:
await send_room_message(self.client, room_id, reply_message={e})
logger.error(e, exc_info=True)
- # pandora
- if self.pandora_api_endpoint is not None:
- t = self.talk_prog.match(content_body)
- if t:
- if sender_id not in self.pandora_data:
- self.pandora_session_init(sender_id)
- prompt = t.group(1)
- try:
- asyncio.create_task(
- self.talk(
- room_id,
- reply_to_event_id,
- prompt,
- sender_id,
- raw_user_message,
- )
- )
- except Exception as e:
- logger.error(e, exc_info=True)
-
- g = self.goon_prog.match(content_body)
- if g:
- if sender_id not in self.pandora_data:
- self.pandora_session_init(sender_id)
- try:
- asyncio.create_task(
- self.goon(
- room_id,
- reply_to_event_id,
- sender_id,
- raw_user_message,
- )
- )
- except Exception as e:
- logger.error(e, exc_info=True)
-
# !new command
n = self.new_prog.match(content_body)
if n:
- new_command_kind = n.group(1)
+ new_command = n.group(1)
try:
asyncio.create_task(
self.new(
@@ -411,7 +271,7 @@ class Bot:
reply_to_event_id,
sender_id,
raw_user_message,
- new_command_kind,
+ new_command,
)
)
except Exception as e:
@@ -421,7 +281,11 @@ class Bot:
h = self.help_prog.match(content_body)
if h:
try:
- asyncio.create_task(self.help(room_id))
+ asyncio.create_task(
+ self.help(
+ room_id, reply_to_event_id, sender_id, raw_user_message
+ )
+ )
except Exception as e:
logger.error(e, exc_info=True)
@@ -670,7 +534,7 @@ class Bot:
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
):
try:
- await self.client.room_typing(room_id, timeout=300000)
+ await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
if (
self.chatgpt_data[sender_id]["first_time"]
or "conversationId" not in self.chatgpt_data[sender_id]
@@ -705,128 +569,43 @@ class Bot:
self.client,
room_id,
reply_message=content,
- reply_to_event_id="",
+ reply_to_event_id=reply_to_event_id,
sender_id=sender_id,
user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
)
- except Exception as e:
- await send_room_message(self.client, room_id, reply_message=str(e))
+ except Exception:
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=GENERAL_ERROR_MESSAGE,
+ reply_to_event_id=reply_to_event_id,
+ )
# !gpt command
async def gpt(
self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
) -> None:
try:
- # sending typing state
- await self.client.room_typing(room_id, timeout=30000)
- # timeout 300s
- text = await asyncio.wait_for(
- self.askgpt.oneTimeAsk(
- prompt, chatgpt_api_endpoint, self.gptheaders, self.temperature
- ),
- timeout=300,
+ # sending typing state, seconds to milliseconds
+ await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
+ responseMessage = await self.chatbot.oneTimeAsk(
+ prompt=prompt,
)
- text = text.strip()
await send_room_message(
self.client,
room_id,
- reply_message=text,
- reply_to_event_id="",
+ reply_message=responseMessage.strip(),
+ reply_to_event_id=reply_to_event_id,
sender_id=sender_id,
user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
)
except Exception:
await send_room_message(
self.client,
room_id,
- reply_message="Error encountered, please try again or contact admin.",
- )
-
- # !bing command
- async def bing(
- self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
- ) -> None:
- try:
- # sending typing state
- await self.client.room_typing(room_id, timeout=300000)
-
- if (
- self.bing_data[sender_id]["first_time"]
- or "conversationId" not in self.bing_data[sender_id]
- ):
- self.bing_data[sender_id]["first_time"] = False
- payload = {
- "message": prompt,
- "clientOptions": {
- "clientToUse": "bing",
- },
- }
- else:
- payload = {
- "message": prompt,
- "clientOptions": {
- "clientToUse": "bing",
- },
- "conversationSignature": self.bing_data[sender_id][
- "conversationSignature"
- ],
- "conversationId": self.bing_data[sender_id]["conversationId"],
- "clientId": self.bing_data[sender_id]["clientId"],
- "invocationId": self.bing_data[sender_id]["invocationId"],
- }
- resp = await self.gptbot.queryBing(payload)
- content = "".join(
- [body["text"] for body in resp["details"]["adaptiveCards"][0]["body"]]
- )
- self.bing_data[sender_id]["conversationSignature"] = resp[
- "conversationSignature"
- ]
- self.bing_data[sender_id]["conversationId"] = resp["conversationId"]
- self.bing_data[sender_id]["clientId"] = resp["clientId"]
- self.bing_data[sender_id]["invocationId"] = resp["invocationId"]
-
- text = content.strip()
- await send_room_message(
- self.client,
- room_id,
- reply_message=text,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
- except Exception as e:
- await send_room_message(self.client, room_id, reply_message=str(e))
-
- # !bard command
- async def bard(
- self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
- ) -> None:
- try:
- # sending typing state
- await self.client.room_typing(room_id)
- response = await self.bard_data[sender_id]["instance"].ask(prompt)
-
- content = str(response["content"]).strip()
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
- except TimeoutError:
- await send_room_message(self.client, room_id, reply_message="TimeoutError")
- except Exception:
- await send_room_message(
- self.client,
- room_id,
- reply_message="Error calling Bard API, please contact admin.",
+ reply_message=GENERAL_ERROR_MESSAGE,
+ reply_to_event_id=reply_to_event_id,
)
# !lc command
@@ -835,120 +614,32 @@ class Bot:
) -> None:
try:
# sending typing state
- await self.client.room_typing(room_id)
+ await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
if self.flowise_api_key is not None:
headers = {"Authorization": f"Bearer {self.flowise_api_key}"}
- response = await flowise_query(
- self.flowise_api_url, prompt, self.session, headers
+ responseMessage = await flowise_query(
+ self.flowise_api_url, prompt, self.httpx_client, headers
)
else:
- response = await flowise_query(
- self.flowise_api_url, prompt, self.session
+ responseMessage = await flowise_query(
+ self.flowise_api_url, prompt, self.httpx_client
)
await send_room_message(
self.client,
room_id,
- reply_message=response,
- reply_to_event_id="",
+ reply_message=responseMessage.strip(),
+ reply_to_event_id=reply_to_event_id,
sender_id=sender_id,
user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
)
except Exception:
await send_room_message(
self.client,
room_id,
- reply_message="Error calling flowise API, please contact admin.",
+ reply_message=GENERAL_ERROR_MESSAGE,
+ reply_to_event_id=reply_to_event_id,
)
- # !talk command
- async def talk(
- self, room_id, reply_to_event_id, prompt, sender_id, raw_user_message
- ) -> None:
- try:
- if self.pandora_data[sender_id]["conversation_id"] is not None:
- data = {
- "prompt": prompt,
- "model": self.pandora_api_model,
- "parent_message_id": self.pandora_data[sender_id][
- "parent_message_id"
- ],
- "conversation_id": self.pandora_data[sender_id]["conversation_id"],
- "stream": False,
- }
- else:
- data = {
- "prompt": prompt,
- "model": self.pandora_api_model,
- "parent_message_id": self.pandora_data[sender_id][
- "parent_message_id"
- ],
- "stream": False,
- }
- # sending typing state
- await self.client.room_typing(room_id)
- response = await self.pandora.talk(data)
- self.pandora_data[sender_id]["conversation_id"] = response[
- "conversation_id"
- ]
- self.pandora_data[sender_id]["parent_message_id"] = response["message"][
- "id"
- ]
- content = response["message"]["content"]["parts"][0]
- if self.pandora_data[sender_id]["first_time"]:
- self.pandora_data[sender_id]["first_time"] = False
- data = {
- "model": self.pandora_api_model,
- "message_id": self.pandora_data[sender_id]["parent_message_id"],
- }
- await self.pandora.gen_title(
- data, self.pandora_data[sender_id]["conversation_id"]
- )
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
- except Exception as e:
- await send_room_message(self.client, room_id, reply_message=str(e))
-
- # !goon command
- async def goon(
- self, room_id, reply_to_event_id, sender_id, raw_user_message
- ) -> None:
- try:
- # sending typing state
- await self.client.room_typing(room_id)
- data = {
- "model": self.pandora_api_model,
- "parent_message_id": self.pandora_data[sender_id]["parent_message_id"],
- "conversation_id": self.pandora_data[sender_id]["conversation_id"],
- "stream": False,
- }
- response = await self.pandora.goon(data)
- self.pandora_data[sender_id]["conversation_id"] = response[
- "conversation_id"
- ]
- self.pandora_data[sender_id]["parent_message_id"] = response["message"][
- "id"
- ]
- content = response["message"]["content"]["parts"][0]
- await send_room_message(
- self.client,
- room_id,
- reply_message=content,
- reply_to_event_id="",
- sender_id=sender_id,
- user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
- )
- except Exception as e:
- await send_room_message(self.client, room_id, reply_message=str(e))
-
# !new command
async def new(
self,
@@ -956,29 +647,14 @@ class Bot:
reply_to_event_id,
sender_id,
raw_user_message,
- new_command_kind,
+ new_command,
) -> None:
try:
- if "talk" in new_command_kind:
- self.pandora_session_init(sender_id)
- content = (
- "New conversation created, please use !talk to start chatting!"
- )
- elif "chat" in new_command_kind:
+ if "chat" in new_command:
self.chatgpt_session_init(sender_id)
content = (
"New conversation created, please use !chat to start chatting!"
)
- elif "bing" in new_command_kind:
- self.bing_session_init(sender_id)
- content = (
- "New conversation created, please use !bing to start chatting!"
- )
- elif "bard" in new_command_kind:
- await self.bard_session_init(sender_id)
- content = (
- "New conversation created, please use !bard to start chatting!"
- )
else:
content = "Unkown keyword, please use !help to see the usage!"
@@ -986,32 +662,41 @@ class Bot:
self.client,
room_id,
reply_message=content,
- reply_to_event_id="",
+ reply_to_event_id=reply_to_event_id,
sender_id=sender_id,
user_message=raw_user_message,
- markdown_formatted=self.markdown_formatted,
)
- except Exception as e:
- await send_room_message(self.client, room_id, reply_message=str(e))
+ except Exception:
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=GENERAL_ERROR_MESSAGE,
+ reply_to_event_id=reply_to_event_id,
+ )
# !pic command
- async def pic(self, room_id, prompt):
+ async def pic(self, room_id, prompt, replay_to_event_id):
try:
- await self.client.room_typing(room_id, timeout=300000)
+ await self.client.room_typing(room_id, timeout=int(self.timeout) * 1000)
# generate image
links = await self.imageGen.get_images(prompt)
image_path_list = await self.imageGen.save_images(
- links, base_path / "images", self.output_four_images
+ links, self.base_path / "images", self.output_four_images
)
# send image
for image_path in image_path_list:
await send_room_image(self.client, room_id, image_path)
await self.client.room_typing(room_id, typing_state=False)
except Exception as e:
- await send_room_message(self.client, room_id, reply_message=str(e))
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=str(e),
+ reply_to_event_id=replay_to_event_id,
+ )
# !help command
- async def help(self, room_id):
+ async def help(self, room_id, reply_to_event_id, sender_id, user_message):
help_info = (
"!gpt [prompt], generate a one time response without context conversation\n"
+ "!chat [prompt], chat with context conversation\n"
@@ -1025,21 +710,24 @@ class Bot:
+ "!help, help message"
) # noqa: E501
- await send_room_message(self.client, room_id, reply_message=help_info)
+ await send_room_message(
+ self.client,
+ room_id,
+ reply_message=help_info,
+ sender_id=sender_id,
+ user_message=user_message,
+ reply_to_event_id=reply_to_event_id,
+ )
# bot login
async def login(self) -> None:
- if self.access_token is not None:
- logger.info("Login via access_token")
- else:
- logger.info("Login via password")
- try:
- resp = await self.client.login(password=self.password)
- if not isinstance(resp, LoginResponse):
- logger.error("Login Failed")
- sys.exit(1)
- except Exception as e:
- logger.error(f"Error: {e}", exc_info=True)
+ resp = await self.client.login(password=self.password, device_name=DEVICE_NAME)
+ if not isinstance(resp, LoginResponse):
+ logger.error("Login Failed")
+ await self.httpx_client.aclose()
+ await self.client.close()
+ sys.exit(1)
+ logger.info("Success login via password")
# import keys
async def import_keys(self):
diff --git a/src/flowise.py b/src/flowise.py
index 500dbf6..a4a99b2 100644
--- a/src/flowise.py
+++ b/src/flowise.py
@@ -1,8 +1,8 @@
-import aiohttp
+import httpx
async def flowise_query(
- api_url: str, prompt: str, session: aiohttp.ClientSession, headers: dict = None
+ api_url: str, prompt: str, session: httpx.AsyncClient, headers: dict = None
) -> str:
"""
Sends a query to the Flowise API and returns the response.
@@ -24,17 +24,15 @@ async def flowise_query(
)
else:
response = await session.post(api_url, json={"question": prompt})
- return await response.json()
+ return await response.text()
async def test():
- session = aiohttp.ClientSession()
- api_url = (
- "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
- )
- prompt = "What is the capital of France?"
- response = await flowise_query(api_url, prompt, session)
- print(response)
+ async with httpx.AsyncClient() as session:
+ api_url = "http://127.0.0.1:3000/api/v1/prediction/683f9ea8-e670-4d51-b657-0886eab9cea1"
+ prompt = "What is the capital of France?"
+ response = await flowise_query(api_url, prompt, session)
+ print(response)
if __name__ == "__main__":
diff --git a/src/gptbot.py b/src/gptbot.py
new file mode 100644
index 0000000..8750cd5
--- /dev/null
+++ b/src/gptbot.py
@@ -0,0 +1,292 @@
+"""
+Code derived from https://github.com/acheong08/ChatGPT/blob/main/src/revChatGPT/V3.py
+A simple wrapper for the official ChatGPT API
+"""
+import json
+from typing import AsyncGenerator
+from tenacity import retry, stop_after_attempt, wait_random_exponential
+
+import httpx
+import tiktoken
+
+
+ENGINES = [
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k-0613",
+ "gpt-4",
+ "gpt-4-32k",
+ "gpt-4-0613",
+ "gpt-4-32k-0613",
+]
+
+
+class Chatbot:
+ """
+ Official ChatGPT API
+ """
+
+ def __init__(
+ self,
+ aclient: httpx.AsyncClient,
+ api_key: str,
+ api_url: str = None,
+ engine: str = None,
+ timeout: float = None,
+ max_tokens: int = None,
+ temperature: float = 0.8,
+ top_p: float = 1.0,
+ presence_penalty: float = 0.0,
+ frequency_penalty: float = 0.0,
+ reply_count: int = 1,
+ truncate_limit: int = None,
+ system_prompt: str = None,
+ ) -> None:
+ """
+ Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
+ """
+ self.engine: str = engine or "gpt-3.5-turbo"
+ self.api_key: str = api_key
+ self.api_url: str = api_url or "https://api.openai.com/v1/chat/completions"
+ self.system_prompt: str = (
+ system_prompt
+ or "You are ChatGPT, \
+ a large language model trained by OpenAI. Respond conversationally"
+ )
+ self.max_tokens: int = max_tokens or (
+ 31000
+ if "gpt-4-32k" in engine
+ else 7000
+ if "gpt-4" in engine
+ else 15000
+ if "gpt-3.5-turbo-16k" in engine
+ else 4000
+ )
+ self.truncate_limit: int = truncate_limit or (
+ 30500
+ if "gpt-4-32k" in engine
+ else 6500
+ if "gpt-4" in engine
+ else 14500
+ if "gpt-3.5-turbo-16k" in engine
+ else 3500
+ )
+ self.temperature: float = temperature
+ self.top_p: float = top_p
+ self.presence_penalty: float = presence_penalty
+ self.frequency_penalty: float = frequency_penalty
+ self.reply_count: int = reply_count
+ self.timeout: float = timeout
+
+ self.aclient = aclient
+
+ self.conversation: dict[str, list[dict]] = {
+ "default": [
+ {
+ "role": "system",
+ "content": system_prompt,
+ },
+ ],
+ }
+
+ if self.get_token_count("default") > self.max_tokens:
+ raise Exception("System prompt is too long")
+
+ def add_to_conversation(
+ self,
+ message: str,
+ role: str,
+ convo_id: str = "default",
+ ) -> None:
+ """
+ Add a message to the conversation
+ """
+ self.conversation[convo_id].append({"role": role, "content": message})
+
+ def __truncate_conversation(self, convo_id: str = "default") -> None:
+ """
+ Truncate the conversation
+ """
+ while True:
+ if (
+ self.get_token_count(convo_id) > self.truncate_limit
+ and len(self.conversation[convo_id]) > 1
+ ):
+ # Don't remove the first message
+ self.conversation[convo_id].pop(1)
+ else:
+ break
+
+ # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
+ def get_token_count(self, convo_id: str = "default") -> int:
+ """
+ Get token count
+ """
+ if self.engine not in ENGINES:
+ raise NotImplementedError(
+ f"Engine {self.engine} is not supported. Select from {ENGINES}",
+ )
+ tiktoken.model.MODEL_TO_ENCODING["gpt-4"] = "cl100k_base"
+
+ encoding = tiktoken.encoding_for_model(self.engine)
+
+ num_tokens = 0
+ for message in self.conversation[convo_id]:
+ # every message follows {role/name}\n{content}\n
+ num_tokens += 5
+ for key, value in message.items():
+ if value:
+ num_tokens += len(encoding.encode(value))
+ if key == "name": # if there's a name, the role is omitted
+ num_tokens += 5 # role is always required and always 1 token
+ num_tokens += 5 # every reply is primed with assistant
+ return num_tokens
+
+ def get_max_tokens(self, convo_id: str) -> int:
+ """
+ Get max tokens
+ """
+ return self.max_tokens - self.get_token_count(convo_id)
+
+ async def ask_stream_async(
+ self,
+ prompt: str,
+ role: str = "user",
+ convo_id: str = "default",
+ model: str = None,
+ pass_history: bool = True,
+ **kwargs,
+ ) -> AsyncGenerator[str, None]:
+ """
+ Ask a question
+ """
+ # Make conversation if it doesn't exist
+ if convo_id not in self.conversation:
+ self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
+ self.add_to_conversation(prompt, "user", convo_id=convo_id)
+ self.__truncate_conversation(convo_id=convo_id)
+ # Get response
+ async with self.aclient.stream(
+ "post",
+ self.api_url,
+ headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
+ json={
+ "model": model or self.engine,
+ "messages": self.conversation[convo_id] if pass_history else [prompt],
+ "stream": True,
+ # kwargs
+ "temperature": kwargs.get("temperature", self.temperature),
+ "top_p": kwargs.get("top_p", self.top_p),
+ "presence_penalty": kwargs.get(
+ "presence_penalty",
+ self.presence_penalty,
+ ),
+ "frequency_penalty": kwargs.get(
+ "frequency_penalty",
+ self.frequency_penalty,
+ ),
+ "n": kwargs.get("n", self.reply_count),
+ "user": role,
+ "max_tokens": min(
+ self.get_max_tokens(convo_id=convo_id),
+ kwargs.get("max_tokens", self.max_tokens),
+ ),
+ },
+ timeout=kwargs.get("timeout", self.timeout),
+ ) as response:
+ if response.status_code != 200:
+ await response.aread()
+ raise Exception(
+ f"{response.status_code} {response.reason_phrase} {response.text}",
+ )
+
+ response_role: str = ""
+ full_response: str = ""
+ async for line in response.aiter_lines():
+ line = line.strip()
+ if not line:
+ continue
+ # Remove "data: "
+ line = line[6:]
+ if line == "[DONE]":
+ break
+ resp: dict = json.loads(line)
+ if "error" in resp:
+ raise Exception(f"{resp['error']}")
+ choices = resp.get("choices")
+ if not choices:
+ continue
+ delta: dict[str, str] = choices[0].get("delta")
+ if not delta:
+ continue
+ if "role" in delta:
+ response_role = delta["role"]
+ if "content" in delta:
+ content: str = delta["content"]
+ full_response += content
+ yield content
+ self.add_to_conversation(full_response, response_role, convo_id=convo_id)
+
+ async def ask_async(
+ self,
+ prompt: str,
+ role: str = "user",
+ convo_id: str = "default",
+ model: str = None,
+ pass_history: bool = True,
+ **kwargs,
+ ) -> str:
+ """
+ Non-streaming ask
+ """
+ response = self.ask_stream_async(
+ prompt=prompt,
+ role=role,
+ convo_id=convo_id,
+ model=model,
+ pass_history=pass_history,
+ **kwargs,
+ )
+ full_response: str = "".join([r async for r in response])
+ return full_response
+
+ def reset(self, convo_id: str = "default", system_prompt: str = None) -> None:
+ """
+ Reset the conversation
+ """
+ self.conversation[convo_id] = [
+ {"role": "system", "content": system_prompt or self.system_prompt},
+ ]
+
+ @retry(wait=wait_random_exponential(min=2, max=5), stop=stop_after_attempt(3))
+ async def oneTimeAsk(
+ self,
+ prompt: str,
+ role: str = "user",
+ model: str = None,
+ **kwargs,
+ ) -> str:
+ async with self.aclient.post(
+ url=self.api_url,
+ json={
+ "model": model or self.engine,
+ "messages": prompt,
+ # kwargs
+ "temperature": kwargs.get("temperature", self.temperature),
+ "top_p": kwargs.get("top_p", self.top_p),
+ "presence_penalty": kwargs.get(
+ "presence_penalty",
+ self.presence_penalty,
+ ),
+ "frequency_penalty": kwargs.get(
+ "frequency_penalty",
+ self.frequency_penalty,
+ ),
+ "user": role,
+ },
+ headers={"Authorization": f"Bearer {kwargs.get('api_key', self.api_key)}"},
+ timeout=kwargs.get("timeout", self.timeout),
+ ) as response:
+ resp = await response.read()
+ return json.loads(resp)["choices"][0]["message"]["content"]
diff --git a/src/main.py b/src/main.py
index 28940ce..fef7d57 100644
--- a/src/main.py
+++ b/src/main.py
@@ -2,6 +2,8 @@ import asyncio
import json
import os
from pathlib import Path
+import signal
+import sys
from bot import Bot
from log import getlogger
@@ -13,8 +15,12 @@ async def main():
need_import_keys = False
config_path = Path(os.path.dirname(__file__)).parent / "config.json"
if os.path.isfile(config_path):
- fp = open(config_path, encoding="utf8")
- config = json.load(fp)
+ try:
+ fp = open(config_path, encoding="utf8")
+ config = json.load(fp)
+ except Exception:
+ logger.error("config.json load error, please check the file")
+ sys.exit(1)
matrix_bot = Bot(
homeserver=config.get("homeserver"),
@@ -22,21 +28,21 @@ async def main():
password=config.get("password"),
device_id=config.get("device_id"),
room_id=config.get("room_id"),
- openai_api_key=config.get("openai_api_key"),
- api_endpoint=config.get("api_endpoint"),
- access_token=config.get("access_token"),
- bard_token=config.get("bard_token"),
- jailbreakEnabled=config.get("jailbreakEnabled"),
- bing_auth_cookie=config.get("bing_auth_cookie"),
- markdown_formatted=config.get("markdown_formatted"),
- output_four_images=config.get("output_four_images"),
import_keys_path=config.get("import_keys_path"),
import_keys_password=config.get("import_keys_password"),
+ openai_api_key=config.get("openai_api_key"),
+ gpt_api_endpoint=config.get("gpt_api_endpoint"),
+ gpt_model=config.get("gpt_model"),
+ max_tokens=int(config.get("max_tokens")),
+ top_p=float(config.get("top_p")),
+ presence_penalty=float(config.get("presence_penalty")),
+ frequency_penalty=float(config.get("frequency_penalty")),
+ reply_count=int(config.get("reply_count")),
+ system_prompt=config.get("system_prompt"),
+ temperature=float(config.get("temperature")),
flowise_api_url=config.get("flowise_api_url"),
flowise_api_key=config.get("flowise_api_key"),
- pandora_api_endpoint=config.get("pandora_api_endpoint"),
- pandora_api_model=config.get("pandora_api_model"),
- temperature=float(config.get("temperature", 0.8)),
+ timeout=float(config.get("timeout")),
)
if (
config.get("import_keys_path")
@@ -51,24 +57,21 @@ async def main():
password=os.environ.get("PASSWORD"),
device_id=os.environ.get("DEVICE_ID"),
room_id=os.environ.get("ROOM_ID"),
- openai_api_key=os.environ.get("OPENAI_API_KEY"),
- api_endpoint=os.environ.get("API_ENDPOINT"),
- access_token=os.environ.get("ACCESS_TOKEN"),
- bard_token=os.environ.get("BARD_TOKEN"),
- jailbreakEnabled=os.environ.get("JAILBREAKENABLED", "false").lower()
- in ("true", "1", "t"),
- bing_auth_cookie=os.environ.get("BING_AUTH_COOKIE"),
- markdown_formatted=os.environ.get("MARKDOWN_FORMATTED", "false").lower()
- in ("true", "1", "t"),
- output_four_images=os.environ.get("OUTPUT_FOUR_IMAGES", "false").lower()
- in ("true", "1", "t"),
import_keys_path=os.environ.get("IMPORT_KEYS_PATH"),
import_keys_password=os.environ.get("IMPORT_KEYS_PASSWORD"),
+ openai_api_key=os.environ.get("OPENAI_API_KEY"),
+ gpt_api_endpoint=os.environ.get("GPT_API_ENDPOINT"),
+ gpt_model=os.environ.get("GPT_MODEL"),
+ max_tokens=int(os.environ.get("MAX_TOKENS")),
+ top_p=float(os.environ.get("TOP_P")),
+ presence_penalty=float(os.environ.get("PRESENCE_PENALTY")),
+ frequency_penalty=float(os.environ.get("FREQUENCY_PENALTY")),
+ reply_count=int(os.environ.get("REPLY_COUNT")),
+ system_prompt=os.environ.get("SYSTEM_PROMPT"),
+ temperature=float(os.environ.get("TEMPERATURE")),
flowise_api_url=os.environ.get("FLOWISE_API_URL"),
flowise_api_key=os.environ.get("FLOWISE_API_KEY"),
- pandora_api_endpoint=os.environ.get("PANDORA_API_ENDPOINT"),
- pandora_api_model=os.environ.get("PANDORA_API_MODEL"),
- temperature=float(os.environ.get("TEMPERATURE", 0.8)),
+ timeout=float(os.environ.get("TIMEOUT")),
)
if (
os.environ.get("IMPORT_KEYS_PATH")
@@ -80,7 +83,20 @@ async def main():
if need_import_keys:
logger.info("start import_keys process, this may take a while...")
await matrix_bot.import_keys()
- await matrix_bot.sync_forever(timeout=30000, full_state=True)
+
+ sync_task = asyncio.create_task(
+ matrix_bot.sync_forever(timeout=30000, full_state=True)
+ )
+
+ # handle signal interrupt
+ loop = asyncio.get_running_loop()
+ for signame in ("SIGINT", "SIGTERM"):
+ loop.add_signal_handler(
+ getattr(signal, signame),
+ lambda: asyncio.create_task(matrix_bot.close(sync_task)),
+ )
+
+ await sync_task
if __name__ == "__main__":
diff --git a/src/pandora_api.py b/src/pandora_api.py
deleted file mode 100644
index 4b4d1c5..0000000
--- a/src/pandora_api.py
+++ /dev/null
@@ -1,111 +0,0 @@
-# API wrapper for https://github.com/pengzhile/pandora/blob/master/doc/HTTP-API.md
-import asyncio
-import uuid
-
-import aiohttp
-
-
-class Pandora:
- def __init__(
- self,
- api_endpoint: str,
- clientSession: aiohttp.ClientSession,
- ) -> None:
- self.api_endpoint = api_endpoint.rstrip("/")
- self.session = clientSession
-
- async def __aenter__(self):
- return self
-
- async def __aexit__(self, exc_type, exc_val, exc_tb):
- await self.session.close()
-
- async def gen_title(self, data: dict, conversation_id: str) -> None:
- """
- data = {
- "model": "",
- "message_id": "",
- }
- :param data: dict
- :param conversation_id: str
- :return: None
- """
- api_endpoint = (
- self.api_endpoint + f"/api/conversation/gen_title/{conversation_id}"
- )
- async with self.session.post(api_endpoint, json=data) as resp:
- return await resp.json()
-
- async def talk(self, data: dict) -> None:
- api_endpoint = self.api_endpoint + "/api/conversation/talk"
- """
- data = {
- "prompt": "",
- "model": "",
- "parent_message_id": "",
- "conversation_id": "", # ignore at the first time
- "stream": True,
- }
- :param data: dict
- :return: None
- """
- data["message_id"] = str(uuid.uuid4())
- async with self.session.post(api_endpoint, json=data) as resp:
- return await resp.json()
-
- async def goon(self, data: dict) -> None:
- """
- data = {
- "model": "",
- "parent_message_id": "",
- "conversation_id": "",
- "stream": True,
- }
- """
- api_endpoint = self.api_endpoint + "/api/conversation/goon"
- async with self.session.post(api_endpoint, json=data) as resp:
- return await resp.json()
-
-
-async def test():
- model = "text-davinci-002-render-sha-mobile"
- api_endpoint = "http://127.0.0.1:8008"
- async with aiohttp.ClientSession() as session:
- client = Pandora(api_endpoint, session)
- conversation_id = None
- parent_message_id = str(uuid.uuid4())
- first_time = True
- async with client:
- while True:
- prompt = input("BobMaster: ")
- if conversation_id:
- data = {
- "prompt": prompt,
- "model": model,
- "parent_message_id": parent_message_id,
- "conversation_id": conversation_id,
- "stream": False,
- }
- else:
- data = {
- "prompt": prompt,
- "model": model,
- "parent_message_id": parent_message_id,
- "stream": False,
- }
- response = await client.talk(data)
- conversation_id = response["conversation_id"]
- parent_message_id = response["message"]["id"]
- content = response["message"]["content"]["parts"][0]
- print("ChatGPT: " + content + "\n")
- if first_time:
- first_time = False
- data = {
- "model": model,
- "message_id": parent_message_id,
- }
- response = await client.gen_title(data, conversation_id)
-
-
-if __name__ == "__main__":
- asyncio.run(test())
diff --git a/src/send_message.py b/src/send_message.py
index 946360b..26179d6 100644
--- a/src/send_message.py
+++ b/src/send_message.py
@@ -1,5 +1,3 @@
-import re
-
import markdown
from log import getlogger
from nio import AsyncClient
@@ -14,32 +12,19 @@ async def send_room_message(
sender_id: str = "",
user_message: str = "",
reply_to_event_id: str = "",
- markdown_formatted: bool = False,
) -> None:
- NORMAL_BODY = content = {
- "msgtype": "m.text",
- "body": reply_message,
- }
if reply_to_event_id == "":
- if markdown_formatted:
- # only format message contains multiline codes, *, |
- if re.search(r"```|\*|\|", reply_message) is not None:
- content = {
- "msgtype": "m.text",
- "body": reply_message,
- "format": "org.matrix.custom.html",
- "formatted_body": markdown.markdown(
- reply_message,
- extensions=["nl2br", "tables", "fenced_code"],
- ),
- }
- else:
- content = NORMAL_BODY
-
- else:
- content = NORMAL_BODY
+ content = {
+ "msgtype": "m.text",
+ "body": reply_message,
+ "format": "org.matrix.custom.html",
+ "formatted_body": markdown.markdown(
+ reply_message,
+ extensions=["nl2br", "tables", "fenced_code"],
+ ),
+ }
else:
- body = r"> <" + sender_id + r"> " + user_message + r"\n\n" + reply_message
+ body = "> <" + sender_id + "> " + user_message + "\n\n" + reply_message
format = r"org.matrix.custom.html"
formatted_body = (
r'
"
+ user_message
+ r"
"
- + reply_message
+ + markdown.markdown(
+ reply_message,
+ extensions=["nl2br", "tables", "fenced_code"],
+ )
)
content = {
diff --git a/sync_db b/sync_db
new file mode 100644
index 0000000000000000000000000000000000000000..27d67defc88e90fa56c06fd83827507bee30e935
GIT binary patch
literal 135168
zcmeI*$*<#DdKmDk9$vlI({s}9lxJ7JPQxj%ZH^K}8-dMv6e*Du2PHv)q$r9M2XQ6^
zBt~^R3$L>akX_b6w%KH#EWJxM`5Tf&kYN*6y`jeEbP#87;RyHMl1Pf;`JIo4=bVpq
zueI0GE`D;Znj+|);17P~!NZ3S{^ZG%2M-=Tdhp=EkC*Q+A3t24eCx{>%fAm_d;T>~
zmwo&}`LDkHV&hN$`-^RU>+^rUW^=F@2mk>f00e*l5C8%|00;m9AOHk_01)^;An+UC
z|NgSqhoAnp2g?_D00AHX1b_e#00KY&2mk>f00e*l5C8%noWQppe)GY@Z@e7;f9EeA
zEMMRO1b_e#00KY&2mk>f00jOT3jEvOefaT%=l^7afB%y>C1P~AFzq1Vv3lo8Q*Q&a
zr8nI@P8yE92y&TF7Cl~}acn%v3}xn)kQ#Gs<-3yR$CF2{vmnOC^A)jfn;h+$>k4DY
zsY;zRAFqaC914*N&s!|N?{AmwWit9>P(-S+UG^0;jY@I%f#bsX8Vp+;a;|~nHWkEF
zjJcCf1s|OdY=jbsW6zOsYr7rNj45&@+2LGX_x!xudu?6_1FgsD5!vHW!dO9vb0KSa
z?u8pDLKx4mI-QXmx|64is|!|zUAt92r0tCkg^0XSMb>ZH3w}Zj(q)1^-dT2|U5mLj
zbM3MlwaXc=@m$Y}pl~JE(2weEtLEGsWLw>-Zwh(AP#2H3;SMEt%yH_BfU!Ln3fH?k
z>iVWF5*8e+aDY>WH}1S3pW5yI0O^A_*m;ZgE`Rll5?e_{QSBz)
z%h!5gHmF-#rmSQRwU^zMY2DmnHrL8Eh!Z72bHgNZ0mEusilbAhS?=+sY_nv{$(yqp
z1TB=C`DD_%fMQnP+3}&8$TNHqB#cR>on{eeN#tI-wph5;=!!Ztnv`rcc%>++Y(n$c
zstUnmJ2|<=q8WQESp%Yq(N;aN%6DKLbO~P^JF1V;;
zb_&eR(UuY9qT-%+zi?emq0VmVj176hDM{>-zeTbXKhG}FDJI*oVO1-f%qIj)F
ziZ$-V>9js+LI>skft8t6Oi@xqjptF`U3RUq<0hLR+sj1Z1{Hj!Do6+go
z2aI_$9D8?h>g`T0yOL|B7>i3qKU_BQh4AERYi}USZ)wqI5S@ie2|aBHVuQO!V%{=$
z9IMn)@G7lIO(l_bZEt6X@qpac`>cZ3Oqmdqlyzic4`IOMWSHCo(J$D});FF%R?5jX
zJ7Yi_l--vae9-bV6JRx7ZTq?rD?`T;E$=$JzMiIaJ_&-oCbiSJ(Puwu6C~NFcpL2q
zffyie*H_4>ZZ)i-TeO4gh`f_twVqBqj9N09ozi&|M#|}UCk@7Td&X*Fnad$Y5d!&)
z8R7jr-7@KHqrsf|OXL3JJIjxUpZ@y?%NKY60U!VbfB+Bx0zd!=00AHX1b_e#00MvQ
z1b+IhWd`(5!m9hj<$v}!>GTJA6$SbB<#YBDbmGots_sSpfB8@U-~j}H01yBIKmZ5;
z0U!VbfB+Bx0zd!=e82)P^8aA`{{dGpC>js|0zd!=00AHX1b_e#00KY&2mpaa0F3`Z
z7ytnv00e*l5C8%|00;m9AOHk_01)`_1;G9PAATK!5&{7r00e*l5C8%|00;m9AOHk_
z01yD8K?#8X
z5C8%|00;m9AOHk_01yBIKmZ7U`~N`<00AHX1b_e#00KY&2mk>f00e*l5cu!~K>q*3
zuVYX`AOHk_01yBIKmZ5;0U!VbfB+Bx0$}_PVgLvL0U!VbfB+Bx0zd!=00AHX1c1PY
zF97cU|M2S=ln@920U!VbfB+Bx0zd!=00AHX1b_g@|AQC+0zd!=00AHX1b_e#00KY&
z2mk>f@Zk%9@&AWk$Do8j00;m9AOHk_01yBIKmZ5;0U!Vb!2SOq27mw%00KY&2mk>f
z00e*l5C8%|00?~e0wDkY;ny)JArJrpKmZ5;0U!VbfB+Bx0zd!=00A)m2QdHyfB+Bx
z0zd!=00AHX1b_e#00KbZ!xsSe|9|*(3`z(DfB+Bx0zd!=00AHX1b_e#00KY&f00e*l5cu!~zW-_e;QNpM{K2RH@CWBlx$phO576%g-~A`w
zQNHVZ@^3!=y-)tpqd))XCy$hG|FduX{M+5Pe*YW);+y~Z8`Oj65C7ALF9S*l1b_e#
z00KbZ|AN5t-~9fg$6NB@^FL0@Q#=)MQsqTjhE-pl+PH1gs%)PHQMA17o~5VvuKxjR
z^0duAk#&yuo_zM+ZC=~QXHN|MYqt9A$=~_x)rWod^yl_>A3Y|?hkqt^L72zy-Sc}F
ze(#GzvS~)+|B?^;qKHe$K6_&Gp8Z5OmhXe6J=vM^mNxw-D)0a7$!Bf6=rqr+@%+|y
z%evPNcp><;~Uagvci!
zKYIN1>BC=aUxvt=8^8JLlQ)Cn&6Qtpy;Fm41;W?X+Fu?8e`R&|vnK~#-XHkC^TmPu
z?A7N!efmeAeDv6O`gQgCW_h2XZ@&J{bM?`q$G`vk4}UJdggnJV8pU1Hw_V!>OO?L$
z`r|h--(LNK^YdjdWBl#&;w{yepYYi`_CI^_rrWC>K7AwPKVQC_oL>d~)9Rz8;rQ{x
z=RbTIm#;Q>ExKMUeDr2KzFP6x)}OapuU5SihkyTDj~@T{#}EJUH(%p>aY(P;^6mGL
zy>`^^l>aTI&-e7+HluHs*6E!_Y-vnRX?x3q`B@nh@w-jeDbC}iYTqaSKWWz9te96p
zxU@I_YmL!Y0sU$pKl`FIKl`e{PoLu7{^+sz0U!VbfB+Bx0zd!=00AHX1b_e#_ABao{VT`8I8YKYIN1
zM-RuBRsJGP|Lm)L^ov)&`eq%!xxwcR!i!}uIKO(|XHS3cZ+`UnkAC!Zb^3BA=!>5}
zd|v&HM~{E>qldqsU)JJ_WiP(}=9~5QV$m;o)-Ibq>*m{gYGGOPZq>Xx=67yOc)Q(y
zwdtG9{^!eIJH)47CHud6%g??%tfx<(fAd!!J*J_D&wuZw__=DvpgF~-=n|yMwe?j@
zzP09Wyjc-%t$po%pI6FT>)#6Zr+@d;j~?%#udAT9-uw)GYr(Jl{BQr@(PIdD_=`V%
zS#58ve(U9@Z&v18Yk$dl{magwS7z^2@SEq!Ya6~us}lZ!!C-qF)`|&t84T
z)2Au1Z4XPalpGxWv%{|`U?Zx5C)@Bjip00;m9AOHk_01yBIKmZ5;0U!Vb
zJ~)AIfAf=XJot}b{QtpKEGQ5V00KY&2mk>f00e*l5C8%|00;nq*91Nv|1S%Ed`=ui
z-o!l8qWYk3_9uG4y>+_1^R2lHGgjP8JLFdRs*l>)Mt?9E%|a=dNAlOm&I+iX1dk
z&!HA~FO&0)t+-m*Gp=|W*+XO9V|ahbt|%E-QWdRZW)0`i8qWBEawl^};7~ky^>jNU
zgp#{;e~zm`-R@aiAj13ZR?b8dx^_CYDT2>ditYUx8+gM-_|10D6s&SI>B!h#)BCyZ
z38KgN1MDB-(q;6MbXQwm^Rqe&8Y)UGgzS!kv~r+3Y)6HjAcJI3@=aK)l%KBmOty2P
zo}~6GeZ|K5rb3~?O%rv<+N}V!*Sok43EeosYrlm^=yomfR$`72byvo)$yJH)#%4A5
zFu$wWdh*~<8nvO`*e$L2`?Yu_vsK}bcb^yWY(8F~t{^i{L~t?e+ne2~-nQKu+in(}
zstl?v7pT1<9;o$c-wQqyZ1$Bs1&0(F8s3mk)*y|?U4fr-?Q%SN$C)mR6E751Lqpgp
zSbk+?=x2U@+@`GK6ojPEnaU8Qki+$IgVX;j61)9j~m)
zU@Ol{Z#ROef-JOg(UA78tEDOH-#5-Z+zi*iGnIyz;$zzTYoE8x+a=Dn(}^lLa>R=0
z5MD2ep!vo2j+4xakeHn!Lg8^dx{d_dHm98uN)6RfiNtX0ec9_V^IzRYQzur^|Rb1Wqx^v)*h4j8`u|(r@4vDfp
z$8|y8;;Dy1fokufgwoLhh|`Mw-Kh>AcM@V&V9$n!vn0iXNK=o0{EUPE0CR{A{yBH(O$lY0`zf2P&o1
zPCacuLg%s#Sh`Sew$iM5V#7`B)bT{?#RR<7d45MEx~eskp_Asw)l182pF+kaRMDs6
z_3~eGw-g2SLhf?MXwR;!`7$Gnd{khVvd}q0sCDDTEtlqKLy~<^F-A^yekG9oMk-Z%
zE(di*vf}{JzU-56=FVrXYOcP{`uz>bv4fW1IM_rTtvIZ$rT7BL7TFV@^A(*lje<1JF~>o`%K<|V9Y?lw@J-<1?T<5;y@
z-{~!7?jh`A?)NoU>i0mgh3#e{GG)Y=N^{EUar46kcl591K
z?7I-Nz2B1UMf8yK2|KXi?WzfUO}O$}Ka23G79xapGXE4|=Ae;jG>X9I08|M1bV&miqqj(@Zr4~sw{x;
zMM5Cb{yuGvey6TjW;QIOadWjc;D*24bDuH;`lyBIO25>e-tDZ7P7k%vaJ7Gpme_&3
zX`^^+qp3-0YqY^VI!PAP3>$MxAXl4eA*-~U#4rSno~SNfC%KU_+U-vAo9JJ+Se{sWEOv><@oo#EfqF(51y~EdBS=?hnAJz&U_AXw+@s?Ar#{+R(
zjhG|kUg#4IE!;?rwIj`lGlKJSbEgOEn?s{9OTzOhcahQ!x|hFVjxtpVL(8AsvaG)b#^
zYn*N?bun2w8d<(|mMgNAal14P6Fj7q6;YE!q=!*F2An1KoS8uz&BW;R;PzZ-?$7YG
ztlM$rk?H9^m$smm_YLY_{R+ia*i~MeTPPzV)T7K@qAiWJTF{z2?4unK)plMtS450e6mzLxsFkm!Dz~^MVvXs
z-W|U%slI~5b$u3{xbAL#b1CPgKUG%Fl|AK|Sse)~h2ZPuq`0~6C2E&n5(BoN8M@tD
zCAE*xC^fsd46GyT4l8qB8%aaz7X#_}iG{6u(3XQB+lACvLzyxg=8|MA32kF8&~B-O
zH0k-|*nUdyn{fzHMZi;1vgBQD9O+wDIu^&TdL_bW_NsK_Ku*XEAKjfCtdPqxerYVO
zv`9{v4_z4AP|Q1Oer4_ud99Ji?xF6o^8!~L&0H7tJ0lLF*C-OB`8c*dJPD?bM)lG{t!ztTIR(<-tjKfR^RuM~
za8HsgYEu{O0y)8EU{UX|Tv7QvFvUYQ(6&xp4FbyopFZ8HV-d4WxKS|gE?Wm9x7OVy
zhg1#OU)9+>w2VIPgk(E3QfZ!8QlPYAd(>05CVgDQ61_|%hLhZ0n|);TM^iA{V{krs
zu-l$W_`3I%l?0bn>JZ75>+U(RoQl1&Pbs#Q1n7X)OC=P0J#n|RF2j<)5*%f}?tHNn
z4kl?;7)za^qYQ1WaqtNUUn#dyO4V&;kXhr_k(Z}!!`)8Yp!MrP*5>7xvXE!I()b9O
zSE0$$TU3~AgCKh*y5n18+HS~;32nB@hTr*0npZ2|IhDyuCJDTdr{1AE%BAUR-&j;w
zY>_p2Pfgg`*|u&nT;W^-+B@V>G1igFP-AeyjefL`RC9)@VOwzpOa`l?GeY+DuH51C
ziwS{2JWg&1L2O$h5tH2xrabv|Ox~+LcSyc)4$DRPTuR%O+h}or-X;BURSx9t=H3`R
zf-#|QE7#s=`K-;21ST--2Rrdo(1
z-d)XA?%bP7KJK@scix3CDMz`sHHYHXR}ws07=N`oSJ_yRHD;GHlZLTtI@2bF#*-8r
z7OLFPo@<~V(o~3HT!OFMdCcMO#D|I;eddB1X)0NBWeXPJJxHN7oJq~(uNzPZw(@zcTQ0y`sRtc}wA|*X|
zgB~ev8;nK{EvI|kIT)s!wT<2TcEMANZuD$oT`w)5?}(z42a+V$c-mTr6~0zYEEcK?
zx)~Xr2HXBbvvy6wNlxlQ0_C$%*2lF5Nn_LE{Y{-Yif;#7!2^j`(Pe&xh=hzSW4BM4
zD?J)rPIclfv90ZLk-!Kc)|Z)@CUpyYJcp%Z9-5__P;fQE4RN*3HCytyRnyvKuxw?*J|v^MR8dMSwYma>$5%Vghp#rhFDupEQ>1?
zP3xkt(gWg_Fm4*KA>B>J^rn@VuTU$m&M|3XXm?|z4U*pvkmjnl{+PfSM%?{rsnL+Z
zI2_{4-O(#N#xf>*Bs~T$HH_wx3Mz4|HR!9cp+|N_Y5K;W_v=fOBz=n*X{BhsV+;MX
z3jz(Z!!>$sYPp{8dQdA`IisO6Wds^^qQnz?f5@kC8I+uj#weW?caEn#Rk;()qgsO|
zbZR2_PP$fYQF(X7Ci6@YS4&@!N4dkDt?qfgPc|)fG+BHgZ#B^bCNu6K0=c;~~4JT=Nb-LhdEl
z(W48C`C8R{$)w?6ykGA|e3-A8I5kc>?Fd-S&Y6UCCnYw5P+mG#j$h??$SvLYu{~s)
zYjQR*bG-BB&^Eb#uGa;3SjMHi-i_CElZBp!SQlX`)*P$l5avaNGrM#{9L2iv?RqmC
z$_~@fi%RL~A>o}+ieY1-S$3qV4zIoKm=;tsgla~gqIxcuzO3lOLf2hu
zHuNkg%Hf&SS9P+voLeeKxmhEP@|bRxY19%HeQRIOg{8=O+#*_g-{hHwsy9ib
zsDa<=J6zW}l_IXqCab(NU57jsAm?GAXWc0AT_Gyc_6XVD(n`oB9r|lZ%*(hMmE!;(
zXiK;4Zp7xBdgGwNF8DFsoTD+Td;fw9W3kMR&U32g7SSAcxAXdFn@T~Ir}f1Vbqe+@
zXC;(Wyg$v?WmW_V5qNT(Btup--6-eyvJ6e`skWV*`<~I1qjS2fYq1;I3wpG#CmcJX
zb~tC+j#(=7M9LSD{kRl$&~qmH$nDC(J4CMSVMHR8&BaAVhJeTKKo#oHYU%v($G
z5WpwJ@M(G`cOrVSy2M>ZgBa%XhK|yni^*}g;)dhSg9p))f+0xl(~jQWo1J!wW~w_A
zXQt56}MZoW9YdY8iHul4C!n-xGyB%|+pz?&djcF*0L#`lq
zK3(l+l+m0i^Dnz^exLeJcyLKzvT>dPbD{hUHCD1ggt<9R2QX6+u16q1x
zhnc%A(}N+mx>;*MDU^v_duNe$N;~A^oyajBv2*~?)iPVG1wCb`N7eL%rqa{f<9J
z^R`?%uFk$mZyTst-`qn*Z7~{!8C%+3wr6Zo__$rBXV9sK2nlvP8JVJ{c
zWsc%`b2=O8`C7Imj`TQ@n%=O;u8{V+bQd>=eTh#uv@*=qsY6&(M^x
zs3G(rpJ8wiBirJ9DByOlsLYV}xQMDzbUCZOf3I+LX$hUZb-u$XvQwx$_d0u`Jx7o>
zHv+E&6ssBQmY%64O)wodp}_li!9@uM*?t!-QzMu4(#?;$V{<$5$7LXZkbdKE%bf2r
z!4hScv=oeM<@NCny~K`-P7;C6D~9(cjIAMr_IDM%^{f+}ThhHw=OUlG{rR@!9hUhF
z96wu%=r2Q8ykewmmngVo>ccjPW?J?x(HJQ
zZ0)?Q=&t3D?UuFa<7QPBn6oR883gW3%L6r)8VOqp
zb`39yTU0U@$jdAhM(6x8M-BD2V`_~@t{0b{@S2cMSU#r9ytH?*l9uW;1MRrgJOyR6
z)sC5Q^z8c{)6b*ldOf6
ze6$fvo6}KEl9hMvm{lpOGTl{%+MOi#zMcF~m4aqdo!6ALZpHH*(KM4TD0)Te{64xO
zgSni#0S29lF^OWre=(Wu++;paf>9UAfK=FXge}8HjkM=D;(np(>dk4mY%XJ_Ei?J|V9Ow4jH?AJF|CFhB6;&x=584awiU)R
z0gamN0mbu@7#t|0Ij=r)@k%8}la$Cgz^wJgz{8*sPV1PmG%GN1`>cgb(m|CT3Qo4J
z9LI~=Hk>jaQ;ku*ccBUE2ZAxPGnLK)(M=vpkUB-FQOjykl%40ULU}RCJf9VA(IpP-
zz`45tEqbdeb|-x6WfRk_&kl;p)83XZhn}2`gsUUkTk=s^>oj;+xty`g?=Evu>2d)h
zYV{E7M`Si9Bs`G{IRz;e)?5cK-6WPl>0Hxt!QY55rYOswcU_wDlXIIjn(Kme$*mgG
zx`z;(kSBNPXdZ82XgJxNtjq*9`5o?{-mX=N6;v8@bBtA1w89vna`968wYZ!c=_(&Q
z%eS0T>&YWsUSsF9_Ci{_clgeyml&T=?>a{VJ`_uK