Merge pull request #248 from matrixgpt/feature/model-options

add option to increase token size; closes #246
This commit is contained in:
bertybuttface 2023-11-08 14:34:09 +00:00 committed by GitHub
commit 3b18d22f2c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 12 additions and 2 deletions

View File

@ -17,6 +17,10 @@ CHATGPT_API_MODEL=gpt-3.5-turbo
#CHATGPT_REVERSE_PROXY=https://api.openai.com/v1/chat/completions #CHATGPT_REVERSE_PROXY=https://api.openai.com/v1/chat/completions
# (Optional) Set the temperature of the model. 0.0 is deterministic, 1.0 is very creative. # (Optional) Set the temperature of the model. 0.0 is deterministic, 1.0 is very creative.
# CHATGPT_TEMPERATURE=0.8 # CHATGPT_TEMPERATURE=0.8
# (Optional) (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
# CHATGPT_MAX_CONTEXT_TOKENS=4097
# You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit.
# CHATGPT_MAX_PROMPT_TOKENS=3097
# Set data store settings # Set data store settings
KEYV_BACKEND=file KEYV_BACKEND=file

View File

@ -38,6 +38,8 @@ export const {
CHATGPT_IGNORE_MEDIA, CHATGPT_IGNORE_MEDIA,
CHATGPT_REVERSE_PROXY, CHATGPT_REVERSE_PROXY,
CHATGPT_TEMPERATURE, CHATGPT_TEMPERATURE,
CHATGPT_MAX_CONTEXT_TOKENS,
CHATGPT_MAX_PROMPT_TOKENS,
} = parseEnv(process.env, { } = parseEnv(process.env, {
DATA_PATH: { schema: z.string().default("./storage"), description: "Set to /storage/ if using docker, ./storage if running without" }, DATA_PATH: { schema: z.string().default("./storage"), description: "Set to /storage/ if using docker, ./storage if running without" },
KEYV_BACKEND: { schema: z.enum(["file", "other"]).default("file"),description: "Set the Keyv backend to 'file' or 'other' if other set KEYV_URL" }, KEYV_BACKEND: { schema: z.enum(["file", "other"]).default("file"),description: "Set the Keyv backend to 'file' or 'other' if other set KEYV_URL" },
@ -72,5 +74,7 @@ export const {
CHATGPT_PROMPT_PREFIX: { schema: z.string().default('Instructions:\nYou are ChatGPT, a large language model trained by OpenAI.'), description: "Instructions to feed to ChatGPT on startup"}, CHATGPT_PROMPT_PREFIX: { schema: z.string().default('Instructions:\nYou are ChatGPT, a large language model trained by OpenAI.'), description: "Instructions to feed to ChatGPT on startup"},
CHATGPT_IGNORE_MEDIA: { schema: z.boolean().default(false), description: "Wether or not the bot should react to non-text messages"}, CHATGPT_IGNORE_MEDIA: { schema: z.boolean().default(false), description: "Wether or not the bot should react to non-text messages"},
CHATGPT_REVERSE_PROXY: { schema: z.string().default(""), description: "Change the api url to use another (OpenAI-compatible) API endpoint" }, CHATGPT_REVERSE_PROXY: { schema: z.string().default(""), description: "Change the api url to use another (OpenAI-compatible) API endpoint" },
CHATGPT_TEMPERATURE: { schema: z.number().default(0.8), description: "Set the temperature for the model" } CHATGPT_TEMPERATURE: { schema: z.number().default(0.8), description: "Set the temperature for the model" },
CHATGPT_MAX_CONTEXT_TOKENS: { schema: z.number().default(4097), description: "Davinci models have a max context length of 4097 tokens, but you may need to change this for other models." },
CHATGPT_MAX_PROMPT_TOKENS: { schema: z.number().default(3097), description: "You might want to lower this to save money if using a paid model. Earlier messages will be dropped until the prompt is within the limit." },
}); });

View File

@ -11,7 +11,7 @@ import {
DATA_PATH, KEYV_URL, OPENAI_AZURE, OPENAI_API_KEY, MATRIX_HOMESERVER_URL, MATRIX_ACCESS_TOKEN, MATRIX_AUTOJOIN, DATA_PATH, KEYV_URL, OPENAI_AZURE, OPENAI_API_KEY, MATRIX_HOMESERVER_URL, MATRIX_ACCESS_TOKEN, MATRIX_AUTOJOIN,
MATRIX_BOT_PASSWORD, MATRIX_BOT_USERNAME, MATRIX_ENCRYPTION, MATRIX_THREADS, CHATGPT_CONTEXT, MATRIX_BOT_PASSWORD, MATRIX_BOT_USERNAME, MATRIX_ENCRYPTION, MATRIX_THREADS, CHATGPT_CONTEXT,
CHATGPT_API_MODEL, KEYV_BOT_STORAGE, KEYV_BACKEND, CHATGPT_PROMPT_PREFIX, MATRIX_WELCOME, CHATGPT_API_MODEL, KEYV_BOT_STORAGE, KEYV_BACKEND, CHATGPT_PROMPT_PREFIX, MATRIX_WELCOME,
CHATGPT_REVERSE_PROXY, CHATGPT_TEMPERATURE CHATGPT_REVERSE_PROXY, CHATGPT_TEMPERATURE, CHATGPT_MAX_CONTEXT_TOKENS, CHATGPT_MAX_PROMPT_TOKENS
} from './env.js' } from './env.js'
import CommandHandler from "./handlers.js" import CommandHandler from "./handlers.js"
import { KeyvStorageProvider } from './storage.js' import { KeyvStorageProvider } from './storage.js'
@ -66,6 +66,8 @@ async function main() {
debug: false, debug: false,
azure: OPENAI_AZURE, azure: OPENAI_AZURE,
reverseProxyUrl: CHATGPT_REVERSE_PROXY, reverseProxyUrl: CHATGPT_REVERSE_PROXY,
maxContextTokens: CHATGPT_MAX_CONTEXT_TOKENS,
maxPromptTokens: CHATGPT_MAX_PROMPT_TOKENS
}; };
const chatgpt = new ChatGPTClient(OPENAI_API_KEY, clientOptions, cacheOptions); const chatgpt = new ChatGPTClient(OPENAI_API_KEY, clientOptions, cacheOptions);