Compare commits
88 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5b28362282 | ||
|
|
8d563d61f1 | ||
|
|
c9d3e0ab6a | ||
|
|
7c2134fb12 | ||
|
|
0c326797dd | ||
|
|
676f133545 | ||
|
|
2dfade1c42 | ||
|
|
509b1e5c63 | ||
|
|
0958db3825 | ||
|
|
072a7e5f05 | ||
|
|
ff59a2e41d | ||
|
|
561ce8e86a | ||
|
|
d259431316 | ||
|
|
ea1dd59ef4 | ||
|
|
49571ac635 | ||
|
|
1f5cb71a64 | ||
|
|
bff365785a | ||
|
|
f2fc47e741 | ||
|
|
44755c964f | ||
|
|
fac2580a19 | ||
|
|
6829d66c1f | ||
|
|
4df6a261d3 | ||
|
|
e69644d7b4 | ||
|
|
9db3d792cc | ||
|
|
df1dfa7d46 | ||
|
|
d4c846b543 | ||
|
|
968b8ccdbd | ||
|
|
583e978a82 | ||
|
|
8a1968b2f8 | ||
|
|
34d2da1ffc | ||
|
|
427b05b891 | ||
|
|
c2d8ae8616 | ||
|
|
9041fe7472 | ||
|
|
20b93ad065 | ||
|
|
10ace5fa75 | ||
|
|
b822cd48d2 | ||
|
|
4d528efaf6 | ||
|
|
0bae503a0a | ||
|
|
9b2359fc27 | ||
|
|
2e390596ea | ||
|
|
ca64efec1b | ||
|
|
fdb65366d7 | ||
|
|
1706886a64 | ||
|
|
00b6af8c74 | ||
|
|
f6118879e5 | ||
|
|
270031c783 | ||
|
|
f1bc711cd7 | ||
|
|
076a9b9b9c | ||
|
|
329aa6d164 | ||
|
|
9d21d1c5b9 | ||
|
|
25f460f454 | ||
|
|
4674a54c70 | ||
|
|
ebd23f7295 | ||
|
|
1d24f39830 | ||
|
|
3e7a29c9dd | ||
|
|
98827440eb | ||
|
|
2bcfb04a72 | ||
|
|
d327c8f5d2 | ||
|
|
690acf1c93 | ||
|
|
53d0ffcd11 | ||
|
|
94df631c44 | ||
|
|
166a4fa44f | ||
|
|
e13b146d6d | ||
|
|
ae03267d9b | ||
|
|
3838ff4617 | ||
|
|
822914d521 | ||
|
|
f5f5b2bbdb | ||
|
|
d7ef4590ea | ||
|
|
4b289640f2 | ||
|
|
12209fe0dd | ||
|
|
4dab094855 | ||
|
|
ebe62ad250 | ||
|
|
cc39074e0a | ||
|
|
650759306d | ||
|
|
398687fad0 | ||
|
|
55cdd2eec6 | ||
|
|
5e6f8cbce7 | ||
|
|
f3402401f1 | ||
|
|
f05f6826f5 | ||
|
|
317cdd3f77 | ||
|
|
345f4b2e85 | ||
|
|
d043a849a9 | ||
|
|
b7dcc4264d | ||
|
|
ab5c81d063 | ||
|
|
1fc896d0bd | ||
|
|
1ba8d4ffa9 | ||
|
|
c64970525b | ||
|
|
bac1fb67d2 |
@@ -1,58 +1,17 @@
|
||||
// {
|
||||
// "name": "LibreChat_dev",
|
||||
// // Update the 'dockerComposeFile' list if you have more compose files or use different names.
|
||||
// "dockerComposeFile": "docker-compose.yml",
|
||||
// // The 'service' property is the name of the service for the container that VS Code should
|
||||
// // use. Update this value and .devcontainer/docker-compose.yml to the real service name.
|
||||
// "service": "librechat",
|
||||
// // The 'workspaceFolder' property is the path VS Code should open by default when
|
||||
// // connected. Corresponds to a volume mount in .devcontainer/docker-compose.yml
|
||||
// "workspaceFolder": "/workspace"
|
||||
// //,
|
||||
// // // Set *default* container specific settings.json values on container create.
|
||||
// // "settings": {},
|
||||
// // // Add the IDs of extensions you want installed when the container is created.
|
||||
// // "extensions": [],
|
||||
// // Uncomment the next line if you want to keep your containers running after VS Code shuts down.
|
||||
// // "shutdownAction": "none",
|
||||
// // Uncomment the next line to use 'postCreateCommand' to run commands after the container is created.
|
||||
// // "postCreateCommand": "uname -a",
|
||||
// // Comment out to connect as root instead. To add a non-root user, see: https://aka.ms/vscode-remote/containers/non-root.
|
||||
// // "remoteUser": "vscode"
|
||||
// }
|
||||
{
|
||||
// "name": "LibreChat_dev",
|
||||
"dockerComposeFile": "docker-compose.yml",
|
||||
"service": "app",
|
||||
// "image": "node:19-alpine",
|
||||
// "workspaceFolder": "/workspaces",
|
||||
"workspaceFolder": "/workspace",
|
||||
// Set *default* container specific settings.json values on container create.
|
||||
// "overrideCommand": true,
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [],
|
||||
"settings": {
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"bash": null
|
||||
}
|
||||
}
|
||||
"dockerComposeFile": "docker-compose.yml",
|
||||
"service": "app",
|
||||
"workspaceFolder": "/workspaces",
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [],
|
||||
"settings": {
|
||||
"terminal.integrated.profiles.linux": {
|
||||
"bash": null
|
||||
}
|
||||
},
|
||||
"postCreateCommand": "",
|
||||
// "workspaceMount": "src=${localWorkspaceFolder},dst=/code,type=bind,consistency=cached"
|
||||
|
||||
// "runArgs": [
|
||||
// "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined",
|
||||
// "-v", "/tmp/.X11-unix:/tmp/.X11-unix",
|
||||
// "-v", "${env:XAUTHORITY}:/root/.Xauthority:rw",
|
||||
// "-v", "/home/${env:USER}/.cdh:/root/.cdh",
|
||||
// "-e", "DISPLAY=${env:DISPLAY}",
|
||||
// "--name=tgw_assistant_backend_dev",
|
||||
// "--network=host"
|
||||
// ],
|
||||
// "settings": {
|
||||
// "terminal.integrated.shell.linux": "/bin/bash"
|
||||
// },
|
||||
"features": {"ghcr.io/devcontainers/features/git:1": {}}
|
||||
}
|
||||
}
|
||||
},
|
||||
"postCreateCommand": "",
|
||||
"features": { "ghcr.io/devcontainers/features/git:1": {} }
|
||||
}
|
||||
|
||||
@@ -1,17 +1,9 @@
|
||||
version: '3.4'
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
app:
|
||||
# container_name: LibreChat_dev
|
||||
image: node:19-bullseye
|
||||
# Using a Dockerfile is optional, but included for completeness.
|
||||
# build:
|
||||
# context: .
|
||||
# dockerfile: Dockerfile
|
||||
# # [Optional] You can use build args to set options. e.g. 'VARIANT' below affects the image in the Dockerfile
|
||||
# args:
|
||||
# VARIANT: buster
|
||||
# network_mode: "host"
|
||||
# restart: always
|
||||
links:
|
||||
- mongodb
|
||||
- meilisearch
|
||||
@@ -21,17 +13,16 @@ services:
|
||||
- "host.docker.internal:host-gateway"
|
||||
|
||||
volumes:
|
||||
# # This is where VS Code should expect to find your project's source code and the value of "workspaceFolder" in .devcontainer/devcontainer.json
|
||||
- ..:/workspace:cached
|
||||
# # - /app/client/node_modules
|
||||
# # - ./api:/app/api
|
||||
# # - ./.env:/app/.env
|
||||
# # - ./.env.development:/app/.env.development
|
||||
# # - ./.env.production:/app/.env.production
|
||||
# # - /app/api/node_modules
|
||||
|
||||
# # Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker-compose for details.
|
||||
# # - /var/run/docker.sock:/var/run/docker.sock
|
||||
# This is where VS Code should expect to find your project's source code and the value of "workspaceFolder" in .devcontainer/devcontainer.json
|
||||
- ..:/workspaces:cached
|
||||
# Uncomment the next line to use Docker from inside the container. See https://aka.ms/vscode-remote/samples/docker-from-docker-compose for details.
|
||||
# - /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- HOST=0.0.0.0
|
||||
- MONGO_URI=mongodb://mongodb:27017/LibreChat
|
||||
# - CHATGPT_REVERSE_PROXY=http://host.docker.internal:8080/api/conversation # if you are hosting your own chatgpt reverse proxy with docker
|
||||
# - OPENAI_REVERSE_PROXY=http://host.docker.internal:8070/v1/chat/completions # if you are hosting your own chatgpt reverse proxy with docker
|
||||
- MEILI_HOST=http://meilisearch:7700
|
||||
|
||||
# Runs app on the same network as the service container, allows "forwardPorts" in devcontainer.json function.
|
||||
# network_mode: service:another-service
|
||||
@@ -42,42 +33,32 @@ services:
|
||||
# Uncomment the next line to use a non-root user for all processes - See https://aka.ms/vscode-remote/containers/non-root for details.
|
||||
# user: vscode
|
||||
|
||||
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
|
||||
# cap_add:
|
||||
# - SYS_PTRACE
|
||||
# security_opt:
|
||||
# - seccomp:unconfined
|
||||
|
||||
# Overrides default command so things don't shut down after the process ends.
|
||||
command: /bin/sh -c "while sleep 1000; do :; done"
|
||||
|
||||
mongodb:
|
||||
container_name: chat-mongodb
|
||||
# network_mode: "host"
|
||||
expose:
|
||||
- 27017
|
||||
# ports:
|
||||
# - 27018:27017
|
||||
image: mongo
|
||||
# restart: always
|
||||
# restart: always
|
||||
volumes:
|
||||
- ./data-node:/data/db
|
||||
command: mongod --noauth
|
||||
meilisearch:
|
||||
container_name: chat-meilisearch
|
||||
image: getmeili/meilisearch:v1.0
|
||||
# network_mode: "host"
|
||||
image: getmeili/meilisearch:v1.5
|
||||
# restart: always
|
||||
expose:
|
||||
- 7700
|
||||
# ports:
|
||||
# - 7700:7700
|
||||
# env_file:
|
||||
# - .env
|
||||
# Uncomment this to access meilisearch from outside docker
|
||||
# ports:
|
||||
# - 7700:7700 # if exposing these ports, make sure your master key is not the default value
|
||||
environment:
|
||||
- SEARCH=false
|
||||
- MEILI_HOST=http://0.0.0.0:7700
|
||||
- MEILI_HTTP_ADDR=0.0.0.0:7700
|
||||
- MEILI_HTTP_ADDR=meilisearch:7700
|
||||
- MEILI_NO_ANALYTICS=true
|
||||
- MEILI_MASTER_KEY=5c71cf56d672d009e36070b5bc5e47b743535ae55c818ae3b735bb6ebfb4ba63
|
||||
volumes:
|
||||
- ./meili_data:/meili_data
|
||||
|
||||
- ./meili_data_v1.5:/meili_data
|
||||
|
||||
626
.env.example
626
.env.example
@@ -1,48 +1,188 @@
|
||||
##########################
|
||||
# Server configuration:
|
||||
##########################
|
||||
#=============================================================#
|
||||
# LibreChat Configuration #
|
||||
#=============================================================#
|
||||
# Please refer to the reference documentation for assistance #
|
||||
# with configuring your LibreChat environment. The guide is #
|
||||
# available both online and within your local LibreChat #
|
||||
# directory: #
|
||||
# Online: https://docs.librechat.ai/install/dotenv.html #
|
||||
# Locally: ./docs/install/dotenv.md #
|
||||
#=============================================================#
|
||||
|
||||
#==================================================#
|
||||
# Server Configuration #
|
||||
#==================================================#
|
||||
|
||||
APP_TITLE=LibreChat
|
||||
|
||||
# Uncomment to add a custom footer.
|
||||
# Uncomment and make empty "" to remove the footer.
|
||||
# CUSTOM_FOOTER="My custom footer"
|
||||
|
||||
# The server will listen to localhost:3080 by default. You can change the target IP as you want.
|
||||
# If you want to make this server available externally, for example to share the server with others
|
||||
# or expose this from a Docker container, set host to 0.0.0.0 or your external IP interface.
|
||||
# Tips: Setting host to 0.0.0.0 means listening on all interfaces. It's not a real IP.
|
||||
# Use localhost:port rather than 0.0.0.0:port to access the server.
|
||||
# Set Node env to development if running in dev mode.
|
||||
DEBUG_LOGGING=true
|
||||
|
||||
HOST=localhost
|
||||
PORT=3080
|
||||
|
||||
# Note: the following enables user balances, which you can add manually
|
||||
# or you will need to build out a balance accruing system for users.
|
||||
# For more info, see https://docs.librechat.ai/features/token_usage.html
|
||||
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
|
||||
|
||||
# To manually add balances, run the following command:
|
||||
# `npm run add-balance`
|
||||
DOMAIN_CLIENT=http://localhost:3080
|
||||
DOMAIN_SERVER=http://localhost:3080
|
||||
|
||||
# You can also specify the email and token credit amount to add, e.g.:
|
||||
# `npm run add-balance example@example.com 1000`
|
||||
#=============#
|
||||
# Permissions #
|
||||
#=============#
|
||||
|
||||
# This works well to track your own usage for personal use; 1000 credits = $0.001 (1 mill USD)
|
||||
# UID=1000
|
||||
# GID=1000
|
||||
|
||||
# Set to true to enable token credit balances for the OpenAI/Plugins endpoints
|
||||
CHECK_BALANCE=false
|
||||
#===================================================#
|
||||
# Endpoints #
|
||||
#===================================================#
|
||||
|
||||
# Automated Moderation System
|
||||
# The Automated Moderation System uses a scoring mechanism to track user violations. As users commit actions
|
||||
# like excessive logins, registrations, or messaging, they accumulate violation scores. Upon reaching
|
||||
# a set threshold, the user and their IP are temporarily banned. This system ensures platform security
|
||||
# by monitoring and penalizing rapid or suspicious activities.
|
||||
# ENDPOINTS=openAI,azureOpenAI,bingAI,chatGPTBrowser,google,gptPlugins,anthropic
|
||||
|
||||
BAN_VIOLATIONS=true # Whether or not to enable banning users for violations (they will still be logged)
|
||||
BAN_DURATION=1000 * 60 * 60 * 2 # how long the user and associated IP are banned for
|
||||
BAN_INTERVAL=20 # a user will be banned everytime their score reaches/crosses over the interval threshold
|
||||
PROXY=
|
||||
|
||||
# The score for each violation
|
||||
#============#
|
||||
# Anthropic #
|
||||
#============#
|
||||
|
||||
ANTHROPIC_API_KEY=user_provided
|
||||
ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
|
||||
# ANTHROPIC_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
# Azure #
|
||||
#============#
|
||||
|
||||
# AZURE_API_KEY=
|
||||
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4
|
||||
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
|
||||
# PLUGINS_USE_AZURE="true"
|
||||
|
||||
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
|
||||
|
||||
# AZURE_OPENAI_API_INSTANCE_NAME=
|
||||
# AZURE_OPENAI_API_DEPLOYMENT_NAME=
|
||||
# AZURE_OPENAI_API_VERSION=
|
||||
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
|
||||
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
|
||||
#============#
|
||||
# BingAI #
|
||||
#============#
|
||||
|
||||
BINGAI_TOKEN=user_provided
|
||||
# BINGAI_HOST=https://cn.bing.com
|
||||
|
||||
#============#
|
||||
# ChatGPT #
|
||||
#============#
|
||||
|
||||
CHATGPT_TOKEN=
|
||||
CHATGPT_MODELS=text-davinci-002-render-sha
|
||||
# CHATGPT_REVERSE_PROXY=<YOUR REVERSE PROXY>
|
||||
|
||||
#============#
|
||||
# Google #
|
||||
#============#
|
||||
|
||||
GOOGLE_KEY=user_provided
|
||||
# GOOGLE_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
# OpenAI #
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=user_provided
|
||||
# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
# TITLE_CONVO=false
|
||||
# OPENAI_TITLE_MODEL=gpt-3.5-turbo
|
||||
|
||||
# OPENAI_SUMMARIZE=true
|
||||
# OPENAI_SUMMARY_MODEL=gpt-3.5-turbo
|
||||
|
||||
# OPENAI_FORCE_PROMPT=true
|
||||
|
||||
# OPENAI_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
# OpenRouter #
|
||||
#============#
|
||||
|
||||
# OPENROUTER_API_KEY=
|
||||
|
||||
#============#
|
||||
# Plugins #
|
||||
#============#
|
||||
|
||||
# PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
|
||||
DEBUG_PLUGINS=true
|
||||
|
||||
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
||||
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
||||
|
||||
# Azure AI Search
|
||||
#-----------------
|
||||
AZURE_AI_SEARCH_SERVICE_ENDPOINT=
|
||||
AZURE_AI_SEARCH_INDEX_NAME=
|
||||
AZURE_AI_SEARCH_API_KEY=
|
||||
|
||||
AZURE_AI_SEARCH_API_VERSION=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_TOP=
|
||||
AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
|
||||
|
||||
# DALL·E 3
|
||||
#----------------
|
||||
# DALLE_API_KEY=
|
||||
# DALLE3_SYSTEM_PROMPT="Your System Prompt here"
|
||||
# DALLE_REVERSE_PROXY=
|
||||
|
||||
# Google
|
||||
#-----------------
|
||||
GOOGLE_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
|
||||
# SerpAPI
|
||||
#-----------------
|
||||
SERPAPI_API_KEY=
|
||||
|
||||
# Stable Diffusion
|
||||
#-----------------
|
||||
SD_WEBUI_URL=http://host.docker.internal:7860
|
||||
|
||||
# WolframAlpha
|
||||
#-----------------
|
||||
WOLFRAM_APP_ID=
|
||||
|
||||
# Zapier
|
||||
#-----------------
|
||||
ZAPIER_NLA_API_KEY=
|
||||
|
||||
#==================================================#
|
||||
# Search #
|
||||
#==================================================#
|
||||
|
||||
SEARCH=true
|
||||
MEILI_NO_ANALYTICS=true
|
||||
MEILI_HOST=http://0.0.0.0:7700
|
||||
MEILI_HTTP_ADDR=0.0.0.0:7700
|
||||
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
|
||||
|
||||
#===================================================#
|
||||
# User System #
|
||||
#===================================================#
|
||||
|
||||
#========================#
|
||||
# Moderation #
|
||||
#========================#
|
||||
|
||||
BAN_VIOLATIONS=true
|
||||
BAN_DURATION=1000 * 60 * 60 * 2
|
||||
BAN_INTERVAL=20
|
||||
|
||||
LOGIN_VIOLATION_SCORE=1
|
||||
REGISTRATION_VIOLATION_SCORE=1
|
||||
@@ -50,398 +190,98 @@ CONCURRENT_VIOLATION_SCORE=1
|
||||
MESSAGE_VIOLATION_SCORE=1
|
||||
NON_BROWSER_VIOLATION_SCORE=20
|
||||
|
||||
# Login and registration rate limiting.
|
||||
LOGIN_MAX=7
|
||||
LOGIN_WINDOW=5
|
||||
REGISTER_MAX=5
|
||||
REGISTER_WINDOW=60
|
||||
|
||||
LOGIN_MAX=7 # The max amount of logins allowed per IP per LOGIN_WINDOW
|
||||
LOGIN_WINDOW=5 # in minutes, determines the window of time for LOGIN_MAX logins
|
||||
REGISTER_MAX=5 # The max amount of registrations allowed per IP per REGISTER_WINDOW
|
||||
REGISTER_WINDOW=60 # in minutes, determines the window of time for REGISTER_MAX registrations
|
||||
LIMIT_CONCURRENT_MESSAGES=true
|
||||
CONCURRENT_MESSAGE_MAX=2
|
||||
|
||||
# Message rate limiting (per user & IP)
|
||||
LIMIT_MESSAGE_IP=true
|
||||
MESSAGE_IP_MAX=40
|
||||
MESSAGE_IP_WINDOW=1
|
||||
|
||||
LIMIT_CONCURRENT_MESSAGES=true # Whether to limit the amount of messages a user can send per request
|
||||
CONCURRENT_MESSAGE_MAX=2 # The max amount of messages a user can send per request
|
||||
LIMIT_MESSAGE_USER=false
|
||||
MESSAGE_USER_MAX=40
|
||||
MESSAGE_USER_WINDOW=1
|
||||
|
||||
LIMIT_MESSAGE_IP=true # Whether to limit the amount of messages an IP can send per MESSAGE_IP_WINDOW
|
||||
MESSAGE_IP_MAX=40 # The max amount of messages an IP can send per MESSAGE_IP_WINDOW
|
||||
MESSAGE_IP_WINDOW=1 # in minutes, determines the window of time for MESSAGE_IP_MAX messages
|
||||
#========================#
|
||||
# Balance #
|
||||
#========================#
|
||||
|
||||
# Note: You can utilize both limiters, but default is to limit by IP only.
|
||||
LIMIT_MESSAGE_USER=false # Whether to limit the amount of messages an IP can send per MESSAGE_USER_WINDOW
|
||||
MESSAGE_USER_MAX=40 # The max amount of messages an IP can send per MESSAGE_USER_WINDOW
|
||||
MESSAGE_USER_WINDOW=1 # in minutes, determines the window of time for MESSAGE_USER_MAX messages
|
||||
CHECK_BALANCE=false
|
||||
|
||||
# If you have permission problems, set here the UID and GID of the user running
|
||||
# the docker compose command. The applications in the container will run with these uid/gid.
|
||||
# UID=1000
|
||||
# GID=1000
|
||||
#========================#
|
||||
# Registration and Login #
|
||||
#========================#
|
||||
|
||||
# Change this to proxy any API request.
|
||||
# It's useful if your machine has difficulty calling the original API server.
|
||||
# PROXY=
|
||||
|
||||
# Change this to your MongoDB URI if different. I recommend appending LibreChat.
|
||||
MONGO_URI=mongodb://127.0.0.1:27018/LibreChat
|
||||
|
||||
##########################
|
||||
# OpenAI Endpoint:
|
||||
##########################
|
||||
|
||||
# Access key from OpenAI platform.
|
||||
# Leave it blank to disable this feature.
|
||||
# Set to "user_provided" to allow the user to provide their API key from the UI.
|
||||
OPENAI_API_KEY=user_provided
|
||||
|
||||
DEBUG_OPENAI=false # Set to true to enable debug mode for the OpenAI endpoint
|
||||
|
||||
# Identify the available models, separated by commas *without spaces*.
|
||||
# The first will be default.
|
||||
# Leave it blank to use internal settings.
|
||||
# OPENAI_MODELS=gpt-3.5-turbo-1106,gpt-4-1106-preview,gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,text-davinci-003,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
|
||||
# Titling is enabled by default when initiating a conversation.
|
||||
# Uncomment the following variable to disable this feature.
|
||||
# TITLE_CONVO=false
|
||||
|
||||
# (Optional) The default model used for titling by is gpt-3.5-turbo-0613
|
||||
# You can change it by uncommenting the following and setting the desired model
|
||||
# Must be compatible with the OpenAI Endpoint.
|
||||
# OPENAI_TITLE_MODEL=gpt-3.5-turbo
|
||||
|
||||
# (Optional/Experimental) Enable message summarization by uncommenting the following:
|
||||
# Note: this may affect response time when a summary is being generated.
|
||||
# OPENAI_SUMMARIZE=true
|
||||
|
||||
# Not yet implemented: this will be a conversation option enabled by default to save users on tokens
|
||||
# We are using the ConversationSummaryBufferMemory method to summarize messages.
|
||||
# To learn more about this, see this article:
|
||||
# https://www.pinecone.io/learn/series/langchain/langchain-conversational-memory/
|
||||
|
||||
# (Optional) The default model used for summarizing is gpt-3.5-turbo
|
||||
# You can change it by uncommenting the following and setting the desired model
|
||||
# Must be compatible with the OpenAI Endpoint.
|
||||
# OPENAI_SUMMARY_MODEL=gpt-3.5-turbo
|
||||
|
||||
# Reverse proxy settings for OpenAI:
|
||||
# https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy
|
||||
# The URL must match the "url/v1," pattern, the "openai" suffix is also allowed.
|
||||
# Examples:
|
||||
# - https://open.ai/v1
|
||||
# - https://open.ai/v1/ACCOUNT/GATEWAY/openai
|
||||
# - https://open.ai/v1/hi/openai
|
||||
|
||||
# OPENAI_REVERSE_PROXY=
|
||||
|
||||
# (Advanced) Sometimes when using Local LLM APIs, you may need to force the API
|
||||
# to be called with a `prompt` payload instead of a `messages` payload; to mimic the
|
||||
# a `/v1/completions` request instead of `/v1/chat/completions`
|
||||
# This may be the case for LocalAI with some models. To do so, uncomment the following:
|
||||
# OPENAI_FORCE_PROMPT=true
|
||||
|
||||
# (Advanced) For customization of the DALL-E-3 System prompt,
|
||||
# uncomment the following, and provide your own prompt:
|
||||
# See official prompt for reference:
|
||||
# https://github.com/spdustin/ChatGPT-AutoExpert/blob/main/_system-prompts/dall-e.md
|
||||
# DALLE3_SYSTEM_PROMPT="Your System Prompt here"
|
||||
|
||||
# (Advanced) DALL-E Proxy settings
|
||||
# This is separate from its OpenAI counterpart for customization purposes
|
||||
|
||||
# Reverse proxy settings, changes the baseURL for the DALL-E-3 API Calls
|
||||
# The URL must match the "url/v1," pattern, the "openai" suffix is also allowed.
|
||||
# Examples:
|
||||
# - https://open.ai/v1
|
||||
# - https://open.ai/v1/ACCOUNT/GATEWAY/openai
|
||||
# - https://open.ai/v1/hi/openai
|
||||
|
||||
# DALLE_REVERSE_PROXY=
|
||||
|
||||
# Note: if you have PROXY set, it will be used for DALLE calls also, which is universal for the app
|
||||
|
||||
##########################
|
||||
# OpenRouter (overrides OpenAI and Plugins Endpoints):
|
||||
##########################
|
||||
|
||||
# OpenRouter is a legitimate proxy service to a multitude of LLMs, both closed and open source, including:
|
||||
# OpenAI models, Anthropic models, Meta's Llama models, pygmalionai/mythalion-13b
|
||||
# and many more open source models. Newer integrations are usually discounted, too!
|
||||
|
||||
# Note: this overrides the OpenAI and Plugins Endpoints.
|
||||
# See ./docs/install/free_ai_apis.md for more info.
|
||||
|
||||
# OPENROUTER_API_KEY=
|
||||
|
||||
##########################
|
||||
# AZURE Endpoint:
|
||||
##########################
|
||||
|
||||
# To use Azure with this project, set the following variables. These will be used to build the API URL.
|
||||
# Chat completion:
|
||||
# `https://{AZURE_OPENAI_API_INSTANCE_NAME}.openai.azure.com/openai/deployments/{AZURE_OPENAI_API_DEPLOYMENT_NAME}/chat/completions?api-version={AZURE_OPENAI_API_VERSION}`;
|
||||
# You should also consider changing the `OPENAI_MODELS` variable above to the models available in your instance/deployment.
|
||||
# Note: I've noticed that the Azure API is much faster than the OpenAI API, so the streaming looks almost instantaneous.
|
||||
# Note "AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME" and "AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME" are optional but might be used in the future
|
||||
|
||||
# AZURE_API_KEY=
|
||||
# AZURE_OPENAI_API_INSTANCE_NAME=
|
||||
# AZURE_OPENAI_API_DEPLOYMENT_NAME=
|
||||
# AZURE_OPENAI_API_VERSION=
|
||||
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
|
||||
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
|
||||
|
||||
# NOTE: As of 2023-11-10, the Azure API only allows one model per deployment,
|
||||
# It's recommended to name your deployments after the model name, e.g. "gpt-35-turbo,"
|
||||
# which allows for fast deployment switching and AZURE_USE_MODEL_AS_DEPLOYMENT_NAME enabled.
|
||||
# However, you can use non-model deployment names and setting the AZURE_OPENAI_DEFAULT_MODEL to ensure it works as expected.
|
||||
|
||||
# Identify the available models, separated by commas *without spaces*.
|
||||
# The first will be default. Leave it blank or as is to use internal settings.
|
||||
# NOTE: as deployment names can't have periods, they will be removed when the endpoint is generated.
|
||||
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4
|
||||
|
||||
# (Advanced) this enables the use of the model name as the deployment name, e.g. "gpt-3.5-turbo" as the deployment name
|
||||
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
|
||||
|
||||
# (Advanced) this overrides the model setting for Azure, in case you want to use your custom deployment names
|
||||
# as the values for AZURE_OPENAI_MODELS
|
||||
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
|
||||
|
||||
# To use Azure with the Plugins endpoint, you need the variables above, and uncomment the following variable:
|
||||
# NOTE: This may not work as expected and Azure OpenAI may not support OpenAI Functions yet
|
||||
# Omit/leave it commented to use the default OpenAI API
|
||||
|
||||
# PLUGINS_USE_AZURE="true"
|
||||
|
||||
##########################
|
||||
# ChatGPT Endpoint:
|
||||
##########################
|
||||
|
||||
# ChatGPT Browser Client (free but use at your own risk)
|
||||
# Access token from https://chat.openai.com/api/auth/session
|
||||
# Exposes your access token to `CHATGPT_REVERSE_PROXY`
|
||||
# Set to "user_provided" to allow the user to provide its token from the UI.
|
||||
# Leave it blank to disable this endpoint
|
||||
CHATGPT_TOKEN=user_provided
|
||||
|
||||
# Identify the available models, separated by commas. The first will be default.
|
||||
# Leave it blank to use internal settings.
|
||||
CHATGPT_MODELS=text-davinci-002-render-sha,gpt-4
|
||||
# NOTE: you can add gpt-4-plugins, gpt-4-code-interpreter, and gpt-4-browsing to the list above and use the models for these features;
|
||||
# however, the view/display portion of these features are not supported, but you can use the underlying models, which have higher token context
|
||||
# Also: text-davinci-002-render-paid is deprecated as of May 2023
|
||||
|
||||
# Reverse proxy setting for OpenAI
|
||||
# https://github.com/waylaidwanderer/node-chatgpt-api#using-a-reverse-proxy
|
||||
# By default it will use the node-chatgpt-api recommended proxy, (it's a third party server)
|
||||
# CHATGPT_REVERSE_PROXY=<YOUR REVERSE PROXY>
|
||||
|
||||
##########################
|
||||
# BingAI Endpoint:
|
||||
##########################
|
||||
|
||||
# Also used for Sydney and jailbreak
|
||||
# To get your Access token for Bing, login to https://www.bing.com
|
||||
# Use dev tools or an extension while logged into the site to copy the content of the _U cookie.
|
||||
# If this fails, follow these instructions https://github.com/danny-avila/LibreChat/issues/370#issuecomment-1560382302 to provide the full cookie strings
|
||||
# or check out our discord https://discord.com/channels/1086345563026489514/1143941308684177429
|
||||
# Set to "user_provided" to allow the user to provide its token from the UI.
|
||||
# Leave it blank to disable this endpoint.
|
||||
BINGAI_TOKEN=user_provided
|
||||
|
||||
# BingAI Host:
|
||||
# Necessary for some people in different countries, e.g. China (https://cn.bing.com)
|
||||
# Leave it blank to use default server.
|
||||
# BINGAI_HOST=https://cn.bing.com
|
||||
|
||||
#############################
|
||||
# Plugins:
|
||||
#############################
|
||||
|
||||
# Identify the available models, separated by commas *without spaces*.
|
||||
# The first will be default.
|
||||
# Leave it blank to use internal settings.
|
||||
# PLUGIN_MODELS=gpt-3.5-turbo,gpt-3.5-turbo-16k,gpt-3.5-turbo-0301,gpt-4,gpt-4-0314,gpt-4-0613
|
||||
|
||||
DEBUG_PLUGINS=true # Set to false or comment out to disable debug mode for plugins
|
||||
|
||||
# For securely storing credentials, you need a fixed key and IV. You can set them here for prod and dev environments
|
||||
# If you don't set them, the app will crash on startup.
|
||||
# You need a 32-byte key (64 characters in hex) and 16-byte IV (32 characters in hex)
|
||||
# Use this replit to generate some quickly: https://replit.com/@daavila/crypto#index.js
|
||||
# Here are some examples (THESE ARE NOT SECURE!)
|
||||
CREDS_KEY=f34be427ebb29de8d88c107a71546019685ed8b241d8f2ed00c3df97ad2566f0
|
||||
CREDS_IV=e2341419ec3dd3d19b13a1a87fafcbfb
|
||||
|
||||
# AI-Assisted Google Search
|
||||
# This bot supports searching google for answers to your questions with assistance from GPT!
|
||||
# See detailed instructions here: https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md
|
||||
GOOGLE_API_KEY=
|
||||
GOOGLE_CSE_ID=
|
||||
|
||||
# StableDiffusion WebUI
|
||||
# This bot supports StableDiffusion WebUI, using it's API to generated requested images.
|
||||
# See detailed instructions here: https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/stable_diffusion.md
|
||||
# Use "http://127.0.0.1:7860" with local install and "http://host.docker.internal:7860" for docker
|
||||
SD_WEBUI_URL=http://host.docker.internal:7860
|
||||
|
||||
# Azure Cognitive Search
|
||||
# This plugin supports searching Azure Cognitive Search for answers to your questions.
|
||||
# See detailed instructions here: https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/azure_cognitive_search.md
|
||||
AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT=
|
||||
AZURE_COGNITIVE_SEARCH_INDEX_NAME=
|
||||
AZURE_COGNITIVE_SEARCH_API_KEY=
|
||||
|
||||
AZURE_COGNITIVE_SEARCH_API_VERSION=
|
||||
AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_QUERY_TYPE=
|
||||
AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP=
|
||||
AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT=
|
||||
|
||||
##########################
|
||||
# PaLM (Google) Endpoint:
|
||||
##########################
|
||||
|
||||
# Follow the instruction here to setup:
|
||||
# https://github.com/danny-avila/LibreChat/blob/main/docs/install/apis_and_tokens.md
|
||||
|
||||
PALM_KEY=user_provided
|
||||
|
||||
# In case you need a reverse proxy for this endpoint:
|
||||
# GOOGLE_REVERSE_PROXY=
|
||||
|
||||
##########################
|
||||
# Anthropic Endpoint:
|
||||
##########################
|
||||
# Access key from https://console.anthropic.com/
|
||||
# Leave it blank to disable this feature.
|
||||
# Set to "user_provided" to allow the user to provide their API key from the UI.
|
||||
# Note that access to claude-1 may potentially become unavailable with the release of claude-2.
|
||||
ANTHROPIC_API_KEY=user_provided
|
||||
ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
|
||||
|
||||
##########################
|
||||
# Proxy: To be Used by all endpoints
|
||||
##########################
|
||||
|
||||
PROXY=
|
||||
|
||||
##########################
|
||||
# Search:
|
||||
##########################
|
||||
|
||||
# ENABLING SEARCH MESSAGES/CONVOS
|
||||
# Requires the installation of the free self-hosted Meilisearch or a paid Remote Plan (Remote not tested)
|
||||
# The easiest setup for this is through docker-compose, which takes care of it for you.
|
||||
SEARCH=true
|
||||
|
||||
# HIGHLY RECOMMENDED: Disable anonymized telemetry analytics for MeiliSearch for absolute privacy.
|
||||
MEILI_NO_ANALYTICS=true
|
||||
|
||||
# REQUIRED FOR SEARCH: MeiliSearch Host, mainly for the API server to connect to the search server.
|
||||
# Replace '0.0.0.0' with 'meilisearch' if serving MeiliSearch with docker-compose.
|
||||
MEILI_HOST=http://0.0.0.0:7700
|
||||
|
||||
# REQUIRED FOR SEARCH: MeiliSearch HTTP Address, mainly for docker-compose to expose the search server.
|
||||
# Replace '0.0.0.0' with 'meilisearch' if serving MeiliSearch with docker-compose.
|
||||
MEILI_HTTP_ADDR=0.0.0.0:7700
|
||||
|
||||
# REQUIRED FOR SEARCH: In production env., a secure key is needed. You can generate your own.
|
||||
# This master key must be at least 16 bytes, composed of valid UTF-8 characters.
|
||||
# MeiliSearch will throw an error and refuse to launch if no master key is provided,
|
||||
# or if it is under 16 bytes. MeiliSearch will suggest a secure autogenerated master key.
|
||||
# Using docker, it seems recognized as production so use a secure key.
|
||||
# This is a ready made secure key for docker-compose, you can replace it with your own.
|
||||
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
|
||||
|
||||
##########################
|
||||
# User System:
|
||||
##########################
|
||||
|
||||
# Allow Public Registration
|
||||
ALLOW_EMAIL_LOGIN=true
|
||||
ALLOW_REGISTRATION=true
|
||||
|
||||
# Allow Social Registration
|
||||
ALLOW_SOCIAL_LOGIN=false
|
||||
|
||||
# Allow Social Registration (WORKS ONLY for Google, Github, Discord)
|
||||
ALLOW_SOCIAL_REGISTRATION=false
|
||||
|
||||
# JWT Secrets
|
||||
# You should use secure values. The examples given are 32-byte keys (64 characters in hex)
|
||||
# Use this replit to generate some quickly: https://replit.com/@daavila/crypto#index.js
|
||||
SESSION_EXPIRY=1000 * 60 * 15
|
||||
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
|
||||
|
||||
JWT_SECRET=16f8c0ef4a5d391b26034086c628469d3f9f497f08163ab9b40137092f2909ef
|
||||
JWT_REFRESH_SECRET=eaa5191f2914e30b9387fd84e254e4ba6fc51b4654968a9b0803b456a54b8418
|
||||
|
||||
# Google:
|
||||
# Add your Google Client ID and Secret here, you must register an app with Google Cloud to get these values
|
||||
# https://cloud.google.com/
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
GOOGLE_CALLBACK_URL=/oauth/google/callback
|
||||
# Discord
|
||||
DISCORD_CLIENT_ID=
|
||||
DISCORD_CLIENT_SECRET=
|
||||
DISCORD_CALLBACK_URL=/oauth/discord/callback
|
||||
|
||||
# Facebook:
|
||||
# Add your Facebook Client ID and Secret here, you must register an app with Facebook to get these values
|
||||
# https://developers.facebook.com/
|
||||
# Facebook
|
||||
FACEBOOK_CLIENT_ID=
|
||||
FACEBOOK_CLIENT_SECRET=
|
||||
FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
|
||||
|
||||
# OpenID:
|
||||
# See OpenID provider to get the below values
|
||||
# Create random string for OPENID_SESSION_SECRET
|
||||
# For Azure AD
|
||||
# ISSUER: https://login.microsoftonline.com/(tenant id)/v2.0/
|
||||
# SCOPE: openid profile email
|
||||
# GitHub
|
||||
GITHUB_CLIENT_ID=
|
||||
GITHUB_CLIENT_SECRET=
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback
|
||||
|
||||
# Google
|
||||
GOOGLE_CLIENT_ID=
|
||||
GOOGLE_CLIENT_SECRET=
|
||||
GOOGLE_CALLBACK_URL=/oauth/google/callback
|
||||
|
||||
# OpenID
|
||||
OPENID_CLIENT_ID=
|
||||
OPENID_CLIENT_SECRET=
|
||||
OPENID_ISSUER=
|
||||
OPENID_SESSION_SECRET=
|
||||
OPENID_SCOPE="openid profile email"
|
||||
OPENID_CALLBACK_URL=/oauth/openid/callback
|
||||
# If LABEL and URL are left empty, then the default OpenID label and logo are used.
|
||||
|
||||
OPENID_BUTTON_LABEL=
|
||||
OPENID_IMAGE_URL=
|
||||
|
||||
# Set the expiration delay for the secure cookie with the JWT token
|
||||
# Recommend session expiry to be 15 minutes
|
||||
# Delay is in millisecond e.g. 7 days is 1000*60*60*24*7
|
||||
SESSION_EXPIRY=1000 * 60 * 15
|
||||
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
|
||||
#========================#
|
||||
# Email Password Reset #
|
||||
#========================#
|
||||
|
||||
# Github:
|
||||
# Get the Client ID and Secret from your Discord Application
|
||||
# Add your Discord Client ID and Client Secret here:
|
||||
EMAIL_SERVICE=
|
||||
EMAIL_HOST=
|
||||
EMAIL_PORT=25
|
||||
EMAIL_ENCRYPTION=
|
||||
EMAIL_ENCRYPTION_HOSTNAME=
|
||||
EMAIL_ALLOW_SELFSIGNED=
|
||||
EMAIL_USERNAME=
|
||||
EMAIL_PASSWORD=
|
||||
EMAIL_FROM_NAME=
|
||||
EMAIL_FROM=noreply@librechat.ai
|
||||
|
||||
GITHUB_CLIENT_ID=your_client_id
|
||||
GITHUB_CLIENT_SECRET=your_client_secret
|
||||
GITHUB_CALLBACK_URL=/oauth/github/callback # this should be the same for everyone
|
||||
#==================================================#
|
||||
# Others #
|
||||
#==================================================#
|
||||
# You should leave the following commented out #
|
||||
|
||||
# Discord:
|
||||
# Get the Client ID and Secret from your Discord Application
|
||||
# Add your Github Client ID and Client Secret here:
|
||||
# NODE_ENV=
|
||||
|
||||
DISCORD_CLIENT_ID=your_client_id
|
||||
DISCORD_CLIENT_SECRET=your_client_secret
|
||||
DISCORD_CALLBACK_URL=/oauth/discord/callback # this should be the same for everyone
|
||||
# REDIS_URI=
|
||||
# USE_REDIS=
|
||||
|
||||
###########################
|
||||
# Application Domains
|
||||
###########################
|
||||
|
||||
# Note:
|
||||
# Server = Backend
|
||||
# Client = Public (the client is the url you visit)
|
||||
# For the Google login to work in dev mode, you will need to change DOMAIN_SERVER to localhost:3090 or place it in .env.development
|
||||
|
||||
DOMAIN_CLIENT=http://localhost:3080
|
||||
DOMAIN_SERVER=http://localhost:3080
|
||||
|
||||
###########################
|
||||
# Email
|
||||
###########################
|
||||
|
||||
# Email is used for password reset. Note that all 4 values must be set for email to work.
|
||||
# Failing to set the 4 values will result in LibreChat using the unsecured password reset!
|
||||
EMAIL_SERVICE= # eg. gmail
|
||||
EMAIL_USERNAME= # eg. your email address if using gmail
|
||||
EMAIL_PASSWORD= # eg. this is the "app password" if using gmail
|
||||
EMAIL_FROM=noreply@librechat.ai # email address for from field, it is required to set a value here even in the cases where it's not porperly working.
|
||||
# E2E_USER_EMAIL=
|
||||
# E2E_USER_PASSWORD=
|
||||
|
||||
@@ -19,6 +19,9 @@ module.exports = {
|
||||
'e2e/playwright-report/**/*',
|
||||
'packages/data-provider/types/**/*',
|
||||
'packages/data-provider/dist/**/*',
|
||||
'data-node/**/*',
|
||||
'meili_data/**/*',
|
||||
'node_modules/**/*',
|
||||
],
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/BUG-REPORT.yml
vendored
8
.github/ISSUE_TEMPLATE/BUG-REPORT.yml
vendored
@@ -7,14 +7,6 @@ body:
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report!
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: How can we get in touch with you if we need more info?
|
||||
placeholder: ex. email@example.com
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml
vendored
8
.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml
vendored
@@ -7,14 +7,6 @@ body:
|
||||
attributes:
|
||||
value: |
|
||||
Thank you for taking the time to fill this out!
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: How can we contact you if we need more information?
|
||||
placeholder: ex. email@example.com
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: what
|
||||
attributes:
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/QUESTION.yml
vendored
8
.github/ISSUE_TEMPLATE/QUESTION.yml
vendored
@@ -7,14 +7,6 @@ body:
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill this!
|
||||
- type: input
|
||||
id: contact
|
||||
attributes:
|
||||
label: Contact Details
|
||||
description: How can we get in touch with you if we need more info?
|
||||
placeholder: ex. email@example.com
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: what-is-your-question
|
||||
attributes:
|
||||
|
||||
72
.github/playwright.yml
vendored
Normal file
72
.github/playwright.yml
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
# name: Playwright Tests
|
||||
# on:
|
||||
# pull_request:
|
||||
# branches:
|
||||
# - main
|
||||
# - dev
|
||||
# - release/*
|
||||
# paths:
|
||||
# - 'api/**'
|
||||
# - 'client/**'
|
||||
# - 'packages/**'
|
||||
# - 'e2e/**'
|
||||
# jobs:
|
||||
# tests_e2e:
|
||||
# name: Run Playwright tests
|
||||
# if: github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat'
|
||||
# timeout-minutes: 60
|
||||
# runs-on: ubuntu-latest
|
||||
# env:
|
||||
# NODE_ENV: CI
|
||||
# CI: true
|
||||
# SEARCH: false
|
||||
# BINGAI_TOKEN: user_provided
|
||||
# CHATGPT_TOKEN: user_provided
|
||||
# MONGO_URI: ${{ secrets.MONGO_URI }}
|
||||
# OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
# E2E_USER_EMAIL: ${{ secrets.E2E_USER_EMAIL }}
|
||||
# E2E_USER_PASSWORD: ${{ secrets.E2E_USER_PASSWORD }}
|
||||
# JWT_SECRET: ${{ secrets.JWT_SECRET }}
|
||||
# JWT_REFRESH_SECRET: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
# CREDS_KEY: ${{ secrets.CREDS_KEY }}
|
||||
# CREDS_IV: ${{ secrets.CREDS_IV }}
|
||||
# DOMAIN_CLIENT: ${{ secrets.DOMAIN_CLIENT }}
|
||||
# DOMAIN_SERVER: ${{ secrets.DOMAIN_SERVER }}
|
||||
# PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: 1 # Skip downloading during npm install
|
||||
# PLAYWRIGHT_BROWSERS_PATH: 0 # Places binaries to node_modules/@playwright/test
|
||||
# TITLE_CONVO: false
|
||||
# steps:
|
||||
# - uses: actions/checkout@v4
|
||||
# - uses: actions/setup-node@v4
|
||||
# with:
|
||||
# node-version: 18
|
||||
# cache: 'npm'
|
||||
|
||||
# - name: Install global dependencies
|
||||
# run: npm ci
|
||||
|
||||
# # - name: Remove sharp dependency
|
||||
# # run: rm -rf node_modules/sharp
|
||||
|
||||
# # - name: Install sharp with linux dependencies
|
||||
# # run: cd api && SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp
|
||||
|
||||
# - name: Build Client
|
||||
# run: npm run frontend
|
||||
|
||||
# - name: Install Playwright
|
||||
# run: |
|
||||
# npx playwright install-deps
|
||||
# npm install -D @playwright/test@latest
|
||||
# npx playwright install chromium
|
||||
|
||||
# - name: Run Playwright tests
|
||||
# run: npm run e2e:ci
|
||||
|
||||
# - name: Upload playwright report
|
||||
# uses: actions/upload-artifact@v3
|
||||
# if: always()
|
||||
# with:
|
||||
# name: playwright-report
|
||||
# path: e2e/playwright-report/
|
||||
# retention-days: 30
|
||||
4
.github/workflows/backend-review.yml
vendored
4
.github/workflows/backend-review.yml
vendored
@@ -23,9 +23,9 @@ jobs:
|
||||
BAN_INTERVAL: ${{ secrets.BAN_INTERVAL }}
|
||||
NODE_ENV: CI
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 20.x
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
4
.github/workflows/container.yml
vendored
4
.github/workflows/container.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Set up Docker
|
||||
- name: Set up Docker
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Log in to GitHub Container Registry
|
||||
- name: Log in to GitHub Container Registry
|
||||
|
||||
8
.github/workflows/data-provider.yml
vendored
8
.github/workflows/data-provider.yml
vendored
@@ -11,8 +11,8 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 16
|
||||
- run: cd packages/data-provider && npm ci
|
||||
@@ -22,8 +22,8 @@ jobs:
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 16
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
|
||||
2
.github/workflows/deploy.yml
vendored
2
.github/workflows/deploy.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
steps:
|
||||
# checkout the repo
|
||||
- name: 'Checkout GitHub Action'
|
||||
uses: actions/checkout@main
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: 'Login via Azure CLI'
|
||||
uses: azure/login@v1
|
||||
|
||||
4
.github/workflows/dev-images.yml
vendored
4
.github/workflows/dev-images.yml
vendored
@@ -16,11 +16,11 @@ jobs:
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Set up Docker
|
||||
- name: Set up Docker
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Log in to GitHub Container Registry
|
||||
- name: Log in to GitHub Container Registry
|
||||
|
||||
4
.github/workflows/frontend-review.yml
vendored
4
.github/workflows/frontend-review.yml
vendored
@@ -20,9 +20,9 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 20.x
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
4
.github/workflows/latest-images-main.yml
vendored
4
.github/workflows/latest-images-main.yml
vendored
@@ -10,11 +10,11 @@ jobs:
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Set up Docker
|
||||
- name: Set up Docker
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Log in to GitHub Container Registry
|
||||
- name: Log in to GitHub Container Registry
|
||||
|
||||
2
.github/workflows/mkdocs.yaml
vendored
2
.github/workflows/mkdocs.yaml
vendored
@@ -9,7 +9,7 @@ jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
|
||||
72
.github/workflows/playwright.yml
vendored
72
.github/workflows/playwright.yml
vendored
@@ -1,72 +0,0 @@
|
||||
name: Playwright Tests
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- release/*
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'client/**'
|
||||
- 'packages/**'
|
||||
- 'e2e/**'
|
||||
jobs:
|
||||
tests_e2e:
|
||||
name: Run Playwright tests
|
||||
if: github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat'
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
NODE_ENV: CI
|
||||
CI: true
|
||||
SEARCH: false
|
||||
BINGAI_TOKEN: user_provided
|
||||
CHATGPT_TOKEN: user_provided
|
||||
MONGO_URI: ${{ secrets.MONGO_URI }}
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
E2E_USER_EMAIL: ${{ secrets.E2E_USER_EMAIL }}
|
||||
E2E_USER_PASSWORD: ${{ secrets.E2E_USER_PASSWORD }}
|
||||
JWT_SECRET: ${{ secrets.JWT_SECRET }}
|
||||
JWT_REFRESH_SECRET: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
CREDS_KEY: ${{ secrets.CREDS_KEY }}
|
||||
CREDS_IV: ${{ secrets.CREDS_IV }}
|
||||
DOMAIN_CLIENT: ${{ secrets.DOMAIN_CLIENT }}
|
||||
DOMAIN_SERVER: ${{ secrets.DOMAIN_SERVER }}
|
||||
PLAYWRIGHT_SKIP_BROWSER_DOWNLOAD: 1 # Skip downloading during npm install
|
||||
PLAYWRIGHT_BROWSERS_PATH: 0 # Places binaries to node_modules/@playwright/test
|
||||
TITLE_CONVO: false
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 18
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install global dependencies
|
||||
run: npm ci
|
||||
|
||||
# - name: Remove sharp dependency
|
||||
# run: rm -rf node_modules/sharp
|
||||
|
||||
# - name: Install sharp with linux dependencies
|
||||
# run: cd api && SHARP_IGNORE_GLOBAL_LIBVIPS=1 npm install --arch=x64 --platform=linux --libc=glibc sharp
|
||||
|
||||
- name: Build Client
|
||||
run: npm run frontend
|
||||
|
||||
- name: Install Playwright
|
||||
run: |
|
||||
npx playwright install-deps
|
||||
npm install -D @playwright/test@latest
|
||||
npx playwright install chromium
|
||||
|
||||
- name: Run Playwright tests
|
||||
run: npm run e2e:ci
|
||||
|
||||
- name: Upload playwright report
|
||||
uses: actions/upload-artifact@v3
|
||||
if: always()
|
||||
with:
|
||||
name: playwright-report
|
||||
path: e2e/playwright-report/
|
||||
retention-days: 30
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -2,7 +2,7 @@
|
||||
|
||||
# Logs
|
||||
data-node
|
||||
meili_data
|
||||
meili_data*
|
||||
data/
|
||||
logs
|
||||
*.log
|
||||
@@ -40,7 +40,7 @@ meili_data/
|
||||
api/node_modules/
|
||||
client/node_modules/
|
||||
bower_components/
|
||||
types/
|
||||
*.d.ts
|
||||
|
||||
# Floobits
|
||||
.floo
|
||||
|
||||
@@ -7,7 +7,7 @@ WORKDIR /app
|
||||
# Install call deps - Install curl for health check
|
||||
RUN apk --no-cache add curl && \
|
||||
# We want to inherit env from the container, not the file
|
||||
# This will preserve any existing env file if it's already in souce
|
||||
# This will preserve any existing env file if it's already in source
|
||||
# otherwise it will create a new one
|
||||
touch .env && \
|
||||
# Build deps in seperate
|
||||
|
||||
@@ -1,34 +1,33 @@
|
||||
# Build API, Client and Data Provider
|
||||
FROM node:19-alpine AS base
|
||||
|
||||
WORKDIR /app
|
||||
COPY config/loader.js ./config/
|
||||
RUN npm install dotenv
|
||||
|
||||
WORKDIR /app/api
|
||||
COPY api/package*.json ./
|
||||
COPY api/ ./
|
||||
RUN npm install
|
||||
|
||||
# React client build
|
||||
FROM base AS client-build
|
||||
WORKDIR /app/client
|
||||
COPY ./client/ ./
|
||||
FROM node:20-alpine AS base
|
||||
|
||||
# Build data-provider
|
||||
FROM base AS data-provider-build
|
||||
WORKDIR /app/packages/data-provider
|
||||
COPY ./packages/data-provider ./
|
||||
RUN npm install
|
||||
RUN npm run build
|
||||
|
||||
# React client build
|
||||
FROM data-provider-build AS client-build
|
||||
WORKDIR /app/client
|
||||
COPY ./client/ ./
|
||||
# Copy data-provider to client's node_modules
|
||||
RUN mkdir -p /app/client/node_modules/librechat-data-provider/
|
||||
RUN cp -R /app/packages/data-provider/* /app/client/node_modules/librechat-data-provider/
|
||||
|
||||
WORKDIR /app/client
|
||||
RUN npm install
|
||||
ENV NODE_OPTIONS="--max-old-space-size=2048"
|
||||
RUN npm run build
|
||||
|
||||
# Node API setup
|
||||
FROM base AS api-build
|
||||
FROM data-provider-build AS api-build
|
||||
WORKDIR /app/api
|
||||
COPY api/package*.json ./
|
||||
COPY api/ ./
|
||||
# Copy data-provider to API's node_modules
|
||||
RUN mkdir -p /app/api/node_modules/librechat-data-provider/
|
||||
RUN cp -R /app/packages/data-provider/* /app/api/node_modules/librechat-data-provider/
|
||||
RUN npm install
|
||||
COPY --from=client-build /app/client/dist /app/client/dist
|
||||
EXPOSE 3080
|
||||
ENV HOST=0.0.0.0
|
||||
|
||||
51
README.md
51
README.md
@@ -20,13 +20,33 @@
|
||||
<img
|
||||
src="https://img.shields.io/badge/DOCS-blue.svg?style=for-the-badge&logo=read-the-docs&logoColor=white&labelColor=000000&logoWidth=20">
|
||||
</a>
|
||||
<a aria-label="Sponsors" href="#sponsors">
|
||||
<a aria-label="Sponsors" href="https://github.com/sponsors/danny-avila">
|
||||
<img
|
||||
src="https://img.shields.io/badge/SPONSORS-brightgreen.svg?style=for-the-badge&logo=github-sponsors&logoColor=white&labelColor=000000&logoWidth=20">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
## All-In-One AI Conversations with LibreChat ##
|
||||
# Features
|
||||
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and 11-2023 updates
|
||||
- 💬 Multimodal Chat:
|
||||
- Upload and analyze images with GPT-4 and Gemini Vision 📸
|
||||
- More filetypes and Assistants API integration in Active Development 🚧
|
||||
- 🌎 Multilingual UI:
|
||||
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
|
||||
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands
|
||||
- 🤖 AI model selection: OpenAI API, Azure, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins
|
||||
- 💾 Create, Save, & Share Custom Presets
|
||||
- 🔄 Edit, Resubmit, and Continue messages with conversation branching
|
||||
- 📤 Export conversations as screenshots, markdown, text, json.
|
||||
- 🔍 Search all messages/conversations
|
||||
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
|
||||
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
|
||||
- ⚙️ Configure Proxy, Reverse Proxy, Docker, many Deployment options, and completely Open-Source
|
||||
|
||||
[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
|
||||
|
||||
|
||||
## All-In-One AI Conversations with LibreChat
|
||||
LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
|
||||
|
||||
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
|
||||
@@ -36,15 +56,6 @@ With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use f
|
||||
[](https://youtu.be/pNIOs1ovsXw)
|
||||
Click on the thumbnail to open the video☝️
|
||||
|
||||
# Features
|
||||
- Response streaming identical to ChatGPT through server-sent events
|
||||
- UI from original ChatGPT, including Dark mode
|
||||
- AI model selection: OpenAI API, BingAI, ChatGPT Browser, PaLM2, Anthropic (Claude), Plugins
|
||||
- Create, Save, & Share custom presets - [More info on prompt presets here](https://github.com/danny-avila/LibreChat/releases/tag/v0.3.0)
|
||||
- Edit and Resubmit messages with conversation branching
|
||||
- Search all messages/conversations - [More info here](https://github.com/danny-avila/LibreChat/releases/tag/v0.1.0)
|
||||
- Plugins now available (including web access, image generation and more)
|
||||
|
||||
---
|
||||
|
||||
## ⚠️ [Breaking Changes](docs/general_info/breaking_changes.md) ⚠️
|
||||
@@ -69,10 +80,12 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
* [Mac Install🍎](docs/install/mac_install.md)
|
||||
* [Windows Install💙](docs/install/windows_install.md)
|
||||
* Configuration
|
||||
* [APIs and Tokens](docs/install/apis_and_tokens.md)
|
||||
* [.env Configuration](./docs/install/dotenv.md)
|
||||
* [AI Setup](docs/install/ai_setup.md)
|
||||
* [User Auth System](docs/install/user_auth_system.md)
|
||||
* [Online MongoDB Database](docs/install/mongodb.md)
|
||||
* [Default Language](docs/install/default_language.md)
|
||||
* [LiteLLM Proxy: Load Balance LLMs + Spend Tracking](docs/install/litellm.md)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
@@ -97,10 +110,13 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
|
||||
|
||||
* [Automated Moderation](docs/features/mod_system.md)
|
||||
* [Token Usage](docs/features/token_usage.md)
|
||||
* [Manage Your Database](docs/features/manage_your_database.md)
|
||||
* [PandoraNext Deployment Guide](docs/features/pandoranext.md)
|
||||
* [Third-Party Tools](docs/features/third_party.md)
|
||||
* [Proxy](docs/features/proxy.md)
|
||||
* [Bing Jailbreak](docs/features/bing_jailbreak.md)
|
||||
* [Token Usage](docs/features/token_usage.md)
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
@@ -130,7 +146,6 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
* [Project Roadmap](https://github.com/users/danny-avila/projects/2)
|
||||
</details>
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Star History
|
||||
@@ -141,18 +156,10 @@ Keep up with the latest updates by visiting the releases page - [Releases](https
|
||||
|
||||
---
|
||||
|
||||
## Sponsors
|
||||
|
||||
Sponsored by <a href="https://github.com/mjtechguy"><b>@mjtechguy</b></a>, <a href="https://github.com/SphaeroX"><b>@SphaeroX</b></a>, <a href="https://github.com/DavidDev1334"><b>@DavidDev1334</b></a>, <a href="https://github.com/fuegovic"><b>@fuegovic</b></a>, <a href="https://github.com/Pharrcyde"><b>@Pharrcyde</b></a>
|
||||
|
||||
---
|
||||
|
||||
## Contributors
|
||||
Contributions and suggestions bug reports and fixes are welcome!
|
||||
Please read the documentation before you do!
|
||||
|
||||
---
|
||||
|
||||
For new features, components, or extensions, please open an issue and discuss before sending a PR.
|
||||
|
||||
- Join the [Discord community](https://discord.gg/uDyZ5Tzhct)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
require('dotenv').config();
|
||||
const { KeyvFile } = require('keyv-file');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const askBing = async ({
|
||||
text,
|
||||
@@ -100,7 +101,7 @@ const askBing = async ({
|
||||
}
|
||||
}
|
||||
|
||||
console.log('bing options', options);
|
||||
logger.debug('bing options', options);
|
||||
|
||||
const res = await bingAIClient.sendMessage(text, options);
|
||||
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
// const { Agent, ProxyAgent } = require('undici');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const Anthropic = require('@anthropic-ai/sdk');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const HUMAN_PROMPT = '\n\nHuman:';
|
||||
const AI_PROMPT = '\n\nAssistant:';
|
||||
@@ -9,10 +11,9 @@ const AI_PROMPT = '\n\nAssistant:';
|
||||
const tokenizersCache = {};
|
||||
|
||||
class AnthropicClient extends BaseClient {
|
||||
constructor(apiKey, options = {}, cacheOptions = {}) {
|
||||
super(apiKey, options, cacheOptions);
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
|
||||
this.sender = 'Anthropic';
|
||||
this.userLabel = HUMAN_PROMPT;
|
||||
this.assistantLabel = AI_PROMPT;
|
||||
this.setOptions(options);
|
||||
@@ -40,13 +41,14 @@ class AnthropicClient extends BaseClient {
|
||||
...modelOptions,
|
||||
// set some good defaults (check for undefined in some cases because they may be 0)
|
||||
model: modelOptions.model || 'claude-1',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.7 : modelOptions.temperature, // 0 - 1, 0.7 is recommended
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 1 : modelOptions.temperature, // 0 - 1, 1 is default
|
||||
topP: typeof modelOptions.topP === 'undefined' ? 0.7 : modelOptions.topP, // 0 - 1, default: 0.7
|
||||
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
|
||||
stop: modelOptions.stop, // no stop method for now
|
||||
};
|
||||
|
||||
this.maxContextTokens = this.options.maxContextTokens || 99999;
|
||||
this.maxContextTokens =
|
||||
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000;
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
@@ -59,6 +61,14 @@ class AnthropicClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
this.sender =
|
||||
this.options.sender ??
|
||||
getResponseSender({
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.anthropic,
|
||||
modelLabel: this.options.modelLabel,
|
||||
});
|
||||
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
|
||||
@@ -78,16 +88,15 @@ class AnthropicClient extends BaseClient {
|
||||
}
|
||||
|
||||
getClient() {
|
||||
const options = {
|
||||
apiKey: this.apiKey,
|
||||
};
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
return new Anthropic({
|
||||
apiKey: this.apiKey,
|
||||
baseURL: this.options.reverseProxyUrl,
|
||||
});
|
||||
} else {
|
||||
return new Anthropic({
|
||||
apiKey: this.apiKey,
|
||||
});
|
||||
options.baseURL = this.options.reverseProxyUrl;
|
||||
}
|
||||
|
||||
return new Anthropic(options);
|
||||
}
|
||||
|
||||
async buildMessages(messages, parentMessageId) {
|
||||
@@ -95,9 +104,8 @@ class AnthropicClient extends BaseClient {
|
||||
messages,
|
||||
parentMessageId,
|
||||
});
|
||||
if (this.options.debug) {
|
||||
console.debug('AnthropicClient: orderedMessages', orderedMessages, parentMessageId);
|
||||
}
|
||||
|
||||
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
|
||||
|
||||
const formattedMessages = orderedMessages.map((message) => ({
|
||||
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
@@ -239,7 +247,7 @@ class AnthropicClient extends BaseClient {
|
||||
}
|
||||
|
||||
getCompletion() {
|
||||
console.log('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
}
|
||||
|
||||
async sendCompletion(payload, { onProgress, abortController }) {
|
||||
@@ -254,12 +262,7 @@ class AnthropicClient extends BaseClient {
|
||||
modelOptions.stream = true;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
logger.debug('modelOptions', { modelOptions });
|
||||
|
||||
const client = this.getClient();
|
||||
const metadata = {
|
||||
@@ -287,32 +290,23 @@ class AnthropicClient extends BaseClient {
|
||||
top_p,
|
||||
top_k,
|
||||
};
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: requestOptions');
|
||||
console.dir(requestOptions, { depth: null });
|
||||
}
|
||||
logger.debug('[AnthropicClient]', { ...requestOptions });
|
||||
const response = await client.completions.create(requestOptions);
|
||||
|
||||
signal.addEventListener('abort', () => {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: message aborted!');
|
||||
}
|
||||
logger.debug('[AnthropicClient] message aborted!');
|
||||
response.controller.abort();
|
||||
});
|
||||
|
||||
for await (const completion of response) {
|
||||
if (this.options.debug) {
|
||||
// Uncomment to debug message stream
|
||||
// console.debug(completion);
|
||||
}
|
||||
// Uncomment to debug message stream
|
||||
// logger.debug(completion);
|
||||
text += completion.completion;
|
||||
onProgress(completion.completion);
|
||||
}
|
||||
|
||||
signal.removeEventListener('abort', () => {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient: message aborted!');
|
||||
}
|
||||
logger.debug('[AnthropicClient] message aborted!');
|
||||
response.controller.abort();
|
||||
});
|
||||
|
||||
@@ -328,9 +322,7 @@ class AnthropicClient extends BaseClient {
|
||||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
if (this.options.debug) {
|
||||
console.log('AnthropicClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
const crypto = require('crypto');
|
||||
const { supportsBalanceCheck } = require('librechat-data-provider');
|
||||
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
|
||||
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const TextStream = require('./TextStream');
|
||||
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('../../models');
|
||||
const { addSpaceIfNeeded, isEnabled } = require('../../server/utils');
|
||||
const checkBalance = require('../../models/checkBalance');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
@@ -41,15 +43,14 @@ class BaseClient {
|
||||
}
|
||||
|
||||
async getTokenCountForResponse(response) {
|
||||
if (this.options.debug) {
|
||||
console.debug('`recordTokenUsage` not implemented.', response);
|
||||
}
|
||||
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response);
|
||||
}
|
||||
|
||||
async recordTokenUsage({ promptTokens, completionTokens }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('`recordTokenUsage` not implemented.', { promptTokens, completionTokens });
|
||||
}
|
||||
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
|
||||
promptTokens,
|
||||
completionTokens,
|
||||
});
|
||||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
@@ -62,7 +63,7 @@ class BaseClient {
|
||||
}
|
||||
|
||||
async setMessageOptions(opts = {}) {
|
||||
if (opts && typeof opts === 'object') {
|
||||
if (opts && opts.replaceOptions) {
|
||||
this.setOptions(opts);
|
||||
}
|
||||
|
||||
@@ -194,14 +195,14 @@ class BaseClient {
|
||||
const update = {};
|
||||
|
||||
if (messageId === tokenCountMap.summaryMessage?.messageId) {
|
||||
this.options.debug && console.debug(`Adding summary props to ${messageId}.`);
|
||||
logger.debug(`[BaseClient] Adding summary props to ${messageId}.`);
|
||||
|
||||
update.summary = tokenCountMap.summaryMessage.content;
|
||||
update.summaryTokenCount = tokenCountMap.summaryMessage.tokenCount;
|
||||
}
|
||||
|
||||
if (message.tokenCount && !update.summaryTokenCount) {
|
||||
this.options.debug && console.debug(`Skipping ${messageId}: already had a token count.`);
|
||||
logger.debug(`[BaseClient] Skipping ${messageId}: already had a token count.`);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -278,19 +279,17 @@ class BaseClient {
|
||||
if (instructions) {
|
||||
({ tokenCount, ..._instructions } = instructions);
|
||||
}
|
||||
this.options.debug && _instructions && console.debug('instructions tokenCount', tokenCount);
|
||||
_instructions && logger.debug('[BaseClient] instructions tokenCount: ' + tokenCount);
|
||||
let payload = this.addInstructions(formattedMessages, _instructions);
|
||||
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
|
||||
|
||||
let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
|
||||
await this.getMessagesWithinTokenLimit(orderedWithInstructions);
|
||||
|
||||
this.options.debug &&
|
||||
console.debug(
|
||||
'remainingContextTokens, this.maxContextTokens (1/2)',
|
||||
remainingContextTokens,
|
||||
this.maxContextTokens,
|
||||
);
|
||||
logger.debug('[BaseClient] Context Count (1/2)', {
|
||||
remainingContextTokens,
|
||||
maxContextTokens: this.maxContextTokens,
|
||||
});
|
||||
|
||||
let summaryMessage;
|
||||
let summaryTokenCount;
|
||||
@@ -308,10 +307,9 @@ class BaseClient {
|
||||
|
||||
if (diff > 0) {
|
||||
payload = payload.slice(diff);
|
||||
this.options.debug &&
|
||||
console.debug(
|
||||
`Difference between original payload (${length}) and context (${context.length}): ${diff}`,
|
||||
);
|
||||
logger.debug(
|
||||
`[BaseClient] Difference between original payload (${length}) and context (${context.length}): ${diff}`,
|
||||
);
|
||||
}
|
||||
|
||||
const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1];
|
||||
@@ -338,12 +336,10 @@ class BaseClient {
|
||||
// Make sure to only continue summarization logic if the summary message was generated
|
||||
shouldSummarize = summaryMessage && shouldSummarize;
|
||||
|
||||
this.options.debug &&
|
||||
console.debug(
|
||||
'remainingContextTokens, this.maxContextTokens (2/2)',
|
||||
remainingContextTokens,
|
||||
this.maxContextTokens,
|
||||
);
|
||||
logger.debug('[BaseClient] Context Count (2/2)', {
|
||||
remainingContextTokens,
|
||||
maxContextTokens: this.maxContextTokens,
|
||||
});
|
||||
|
||||
let tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
|
||||
const { messageId } = message;
|
||||
@@ -361,19 +357,13 @@ class BaseClient {
|
||||
|
||||
const promptTokens = this.maxContextTokens - remainingContextTokens;
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('<-------------------------PAYLOAD/TOKEN COUNT MAP------------------------->');
|
||||
console.debug('Payload:', payload);
|
||||
console.debug('Token Count Map:', tokenCountMap);
|
||||
console.debug(
|
||||
'Prompt Tokens',
|
||||
promptTokens,
|
||||
'remainingContextTokens',
|
||||
remainingContextTokens,
|
||||
'this.maxContextTokens',
|
||||
this.maxContextTokens,
|
||||
);
|
||||
}
|
||||
logger.debug('[BaseClient] tokenCountMap:', tokenCountMap);
|
||||
logger.debug('[BaseClient]', {
|
||||
promptTokens,
|
||||
remainingContextTokens,
|
||||
payloadSize: payload.length,
|
||||
maxContextTokens: this.maxContextTokens,
|
||||
});
|
||||
|
||||
return { payload, tokenCountMap, promptTokens, messages: orderedWithInstructions };
|
||||
}
|
||||
@@ -417,14 +407,14 @@ class BaseClient {
|
||||
// this only matters when buildMessages is utilizing the parentMessageId, and may vary on implementation
|
||||
isEdited ? head : userMessage.messageId,
|
||||
this.getBuildMessagesOptions(opts),
|
||||
opts,
|
||||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
console.dir(tokenCountMap, { depth: null });
|
||||
logger.debug('[BaseClient] tokenCountMap', tokenCountMap);
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
console.log('userMessage.tokenCount', userMessage.tokenCount);
|
||||
console.log('userMessage', userMessage);
|
||||
logger.debug('[BaseClient] userMessage', userMessage);
|
||||
}
|
||||
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
@@ -434,7 +424,7 @@ class BaseClient {
|
||||
await this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (isEnabled(process.env.CHECK_BALANCE)) {
|
||||
if (isEnabled(process.env.CHECK_BALANCE) && supportsBalanceCheck[this.options.endpoint]) {
|
||||
await checkBalance({
|
||||
req: this.options.req,
|
||||
res: this.options.res,
|
||||
@@ -442,8 +432,8 @@ class BaseClient {
|
||||
user: this.user,
|
||||
tokenType: 'prompt',
|
||||
amount: promptTokens,
|
||||
debug: this.options.debug,
|
||||
model: this.modelOptions.model,
|
||||
endpoint: this.options.endpoint,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -481,9 +471,7 @@ class BaseClient {
|
||||
}
|
||||
|
||||
async loadHistory(conversationId, parentMessageId = null) {
|
||||
if (this.options.debug) {
|
||||
console.debug('Loading history for conversation', conversationId, parentMessageId);
|
||||
}
|
||||
logger.debug('[BaseClient] Loading history:', { conversationId, parentMessageId });
|
||||
|
||||
const messages = (await getMessages({ conversationId })) ?? [];
|
||||
|
||||
@@ -514,9 +502,14 @@ class BaseClient {
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.debug && this.previous_summary) {
|
||||
if (this.previous_summary) {
|
||||
const { messageId, summary, tokenCount, summaryTokenCount } = this.previous_summary;
|
||||
console.debug('Previous summary:', { messageId, summary, tokenCount, summaryTokenCount });
|
||||
logger.debug('[BaseClient] Previous summary:', {
|
||||
messageId,
|
||||
summary,
|
||||
tokenCount,
|
||||
summaryTokenCount,
|
||||
});
|
||||
}
|
||||
|
||||
return orderedMessages;
|
||||
@@ -636,14 +629,27 @@ class BaseClient {
|
||||
tokensPerName = -1;
|
||||
}
|
||||
|
||||
const processValue = (value) => {
|
||||
if (typeof value === 'object' && value !== null) {
|
||||
for (let [nestedKey, nestedValue] of Object.entries(value)) {
|
||||
if (nestedKey === 'image_url' || nestedValue === 'image_url') {
|
||||
continue;
|
||||
}
|
||||
processValue(nestedValue);
|
||||
}
|
||||
} else {
|
||||
numTokens += this.getTokenCount(value);
|
||||
}
|
||||
};
|
||||
|
||||
let numTokens = tokensPerMessage;
|
||||
for (let [key, value] of Object.entries(message)) {
|
||||
numTokens += this.getTokenCount(value);
|
||||
processValue(value);
|
||||
|
||||
if (key === 'name') {
|
||||
numTokens += tokensPerName;
|
||||
}
|
||||
}
|
||||
|
||||
return numTokens;
|
||||
}
|
||||
|
||||
|
||||
@@ -166,6 +166,12 @@ class ChatGPTClient extends BaseClient {
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
|
||||
if (this.azure || this.options.azure) {
|
||||
// Azure does not accept `model` in the body, so we need to remove it.
|
||||
delete modelOptions.model;
|
||||
}
|
||||
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
@@ -548,7 +554,7 @@ ${botMessage.message}
|
||||
if (isChatGptModel) {
|
||||
return { prompt: [instructionsPayload, messagePayload], context };
|
||||
}
|
||||
return { prompt, context };
|
||||
return { prompt, context, promptTokens: currentTokenCount };
|
||||
}
|
||||
|
||||
getTokenCount(text) {
|
||||
|
||||
@@ -1,23 +1,60 @@
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { google } = require('googleapis');
|
||||
const { Agent, ProxyAgent } = require('undici');
|
||||
const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
|
||||
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
|
||||
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
|
||||
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const {
|
||||
getResponseSender,
|
||||
EModelEndpoint,
|
||||
endpointSettings,
|
||||
AuthKeys,
|
||||
} = require('librechat-data-provider');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { formatMessage } = require('./prompts');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const loc = 'us-central1';
|
||||
const publisher = 'google';
|
||||
const endpointPrefix = `https://${loc}-aiplatform.googleapis.com`;
|
||||
// const apiEndpoint = loc + '-aiplatform.googleapis.com';
|
||||
const tokenizersCache = {};
|
||||
|
||||
const settings = endpointSettings[EModelEndpoint.google];
|
||||
|
||||
class GoogleClient extends BaseClient {
|
||||
constructor(credentials, options = {}) {
|
||||
super('apiKey', options);
|
||||
this.client_email = credentials.client_email;
|
||||
this.project_id = credentials.project_id;
|
||||
this.private_key = credentials.private_key;
|
||||
this.sender = 'PaLM2';
|
||||
let creds = {};
|
||||
|
||||
if (typeof credentials === 'string') {
|
||||
creds = JSON.parse(credentials);
|
||||
} else if (credentials) {
|
||||
creds = credentials;
|
||||
}
|
||||
|
||||
const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
|
||||
this.serviceKey =
|
||||
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : serviceKey ?? {};
|
||||
this.client_email = this.serviceKey.client_email;
|
||||
this.private_key = this.serviceKey.private_key;
|
||||
this.project_id = this.serviceKey.project_id;
|
||||
this.access_token = null;
|
||||
|
||||
this.apiKey = creds[AuthKeys.GOOGLE_API_KEY];
|
||||
|
||||
if (options.skipSetOptions) {
|
||||
return;
|
||||
}
|
||||
this.setOptions(options);
|
||||
}
|
||||
|
||||
/* Google/PaLM2 specific methods */
|
||||
/* Google specific methods */
|
||||
constructUrl() {
|
||||
return `https://us-central1-aiplatform.googleapis.com/v1/projects/${this.project_id}/locations/us-central1/publishers/google/models/${this.modelOptions.model}:predict`;
|
||||
return `${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`;
|
||||
}
|
||||
|
||||
async getClient() {
|
||||
@@ -26,8 +63,7 @@ class GoogleClient extends BaseClient {
|
||||
|
||||
jwtClient.authorize((err) => {
|
||||
if (err) {
|
||||
console.error('Error: jwtClient failed to authorize');
|
||||
console.error(err.message);
|
||||
logger.error('jwtClient failed to authorize', err);
|
||||
throw err;
|
||||
}
|
||||
});
|
||||
@@ -35,6 +71,22 @@ class GoogleClient extends BaseClient {
|
||||
return jwtClient;
|
||||
}
|
||||
|
||||
async getAccessToken() {
|
||||
const scopes = ['https://www.googleapis.com/auth/cloud-platform'];
|
||||
const jwtClient = new google.auth.JWT(this.client_email, null, this.private_key, scopes);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
jwtClient.authorize((err, tokens) => {
|
||||
if (err) {
|
||||
logger.error('jwtClient failed to authorize', err);
|
||||
reject(err);
|
||||
} else {
|
||||
resolve(tokens.access_token);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/* Required Client methods */
|
||||
setOptions(options) {
|
||||
if (this.options && !this.options.replaceOptions) {
|
||||
@@ -53,30 +105,51 @@ class GoogleClient extends BaseClient {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
this.options.examples = this.options.examples.filter(
|
||||
(obj) => obj.input.content !== '' && obj.output.content !== '',
|
||||
);
|
||||
this.options.examples = (this.options.examples ?? [])
|
||||
.filter((ex) => ex)
|
||||
.filter((obj) => obj.input.content !== '' && obj.output.content !== '');
|
||||
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
// set some good defaults (check for undefined in some cases because they may be 0)
|
||||
model: modelOptions.model || 'chat-bison',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.2 : modelOptions.temperature, // 0 - 1, 0.2 is recommended
|
||||
topP: typeof modelOptions.topP === 'undefined' ? 0.95 : modelOptions.topP, // 0 - 1, default: 0.95
|
||||
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
|
||||
model: modelOptions.model || settings.model.default,
|
||||
temperature:
|
||||
typeof modelOptions.temperature === 'undefined'
|
||||
? settings.temperature.default
|
||||
: modelOptions.temperature,
|
||||
topP: typeof modelOptions.topP === 'undefined' ? settings.topP.default : modelOptions.topP,
|
||||
topK: typeof modelOptions.topK === 'undefined' ? settings.topK.default : modelOptions.topK,
|
||||
// stop: modelOptions.stop // no stop method for now
|
||||
};
|
||||
|
||||
this.isChatModel = this.modelOptions.model.startsWith('chat-');
|
||||
if (this.options.attachments) {
|
||||
this.modelOptions.model = 'gemini-pro-vision';
|
||||
}
|
||||
|
||||
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
|
||||
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
|
||||
this.isVisionModel = validateVisionModel(this.modelOptions.model);
|
||||
const { isGenerativeModel } = this;
|
||||
if (this.isVisionModel && !this.options.attachments) {
|
||||
this.modelOptions.model = 'gemini-pro';
|
||||
this.isVisionModel = false;
|
||||
}
|
||||
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
|
||||
const { isChatModel } = this;
|
||||
this.isTextModel = this.modelOptions.model.startsWith('text-');
|
||||
this.isTextModel =
|
||||
!isGenerativeModel && !isChatModel && /code|text/.test(this.modelOptions.model);
|
||||
const { isTextModel } = this;
|
||||
|
||||
this.maxContextTokens = this.options.maxContextTokens || (isTextModel ? 8000 : 4096);
|
||||
this.maxContextTokens = getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google);
|
||||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1024;
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || settings.maxOutputTokens.default;
|
||||
|
||||
if (this.maxContextTokens > 32000) {
|
||||
this.maxContextTokens = this.maxContextTokens - this.maxResponseTokens;
|
||||
}
|
||||
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
@@ -88,10 +161,18 @@ class GoogleClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
this.sender =
|
||||
this.options.sender ??
|
||||
getResponseSender({
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.google,
|
||||
modelLabel: this.options.modelLabel,
|
||||
});
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
this.modelLabel = this.options.modelLabel || 'Assistant';
|
||||
|
||||
if (isChatModel) {
|
||||
if (isChatModel || isGenerativeModel) {
|
||||
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
|
||||
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
|
||||
// without tripping the stop sequences, so I'm using "||>" instead.
|
||||
@@ -99,8 +180,8 @@ class GoogleClient extends BaseClient {
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
|
||||
} else if (isTextModel) {
|
||||
this.startToken = '<|im_start|>';
|
||||
this.endToken = '<|im_end|>';
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
|
||||
'<|im_start|>': 100264,
|
||||
'<|im_end|>': 100265,
|
||||
@@ -138,22 +219,69 @@ class GoogleClient extends BaseClient {
|
||||
return this;
|
||||
}
|
||||
|
||||
getMessageMapMethod() {
|
||||
formatMessages() {
|
||||
return ((message) => ({
|
||||
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
|
||||
content: message?.content ?? message.text,
|
||||
})).bind(this);
|
||||
}
|
||||
|
||||
buildMessages(messages = []) {
|
||||
const formattedMessages = messages.map(this.getMessageMapMethod());
|
||||
async buildVisionMessages(messages = [], parentMessageId) {
|
||||
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
|
||||
const attachments = await this.options.attachments;
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
this.options.req,
|
||||
attachments.filter((file) => file.type.includes('image')),
|
||||
EModelEndpoint.google,
|
||||
);
|
||||
|
||||
const latestMessage = { ...messages[messages.length - 1] };
|
||||
|
||||
latestMessage.image_urls = image_urls;
|
||||
this.options.attachments = files;
|
||||
|
||||
latestMessage.text = prompt;
|
||||
|
||||
const payload = {
|
||||
instances: [
|
||||
{
|
||||
messages: [new HumanMessage(formatMessage({ message: latestMessage }))],
|
||||
},
|
||||
],
|
||||
parameters: this.modelOptions,
|
||||
};
|
||||
return { prompt: payload };
|
||||
}
|
||||
|
||||
async buildMessages(messages = [], parentMessageId) {
|
||||
if (!this.isGenerativeModel && !this.project_id) {
|
||||
throw new Error(
|
||||
'[GoogleClient] a Service Account JSON Key is required for PaLM 2 and Codey models (Vertex AI)',
|
||||
);
|
||||
} else if (this.isGenerativeModel && (!this.apiKey || this.apiKey === 'user_provided')) {
|
||||
throw new Error(
|
||||
'[GoogleClient] an API Key is required for Gemini models (Generative Language API)',
|
||||
);
|
||||
}
|
||||
|
||||
if (this.options.attachments) {
|
||||
return this.buildVisionMessages(messages, parentMessageId);
|
||||
}
|
||||
|
||||
if (this.isTextModel) {
|
||||
return this.buildMessagesPrompt(messages, parentMessageId);
|
||||
}
|
||||
|
||||
let payload = {
|
||||
instances: [
|
||||
{
|
||||
messages: formattedMessages,
|
||||
messages: messages
|
||||
.map(this.formatMessages())
|
||||
.map((msg) => ({ ...msg, role: msg.author === 'User' ? 'user' : 'assistant' }))
|
||||
.map((message) => formatMessage({ message, langChain: true })),
|
||||
},
|
||||
],
|
||||
parameters: this.options.modelOptions,
|
||||
parameters: this.modelOptions,
|
||||
};
|
||||
|
||||
if (this.options.promptPrefix) {
|
||||
@@ -164,34 +292,171 @@ class GoogleClient extends BaseClient {
|
||||
payload.instances[0].examples = this.options.examples;
|
||||
}
|
||||
|
||||
/* TO-DO: text model needs more context since it can't process an array of messages */
|
||||
if (this.isTextModel) {
|
||||
payload.instances = [
|
||||
{
|
||||
prompt: messages[messages.length - 1].content,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('GoogleClient buildMessages');
|
||||
console.dir(payload, { depth: null });
|
||||
}
|
||||
logger.debug('[GoogleClient] buildMessages', payload);
|
||||
|
||||
return { prompt: payload };
|
||||
}
|
||||
|
||||
async getCompletion(payload, abortController = null) {
|
||||
async buildMessagesPrompt(messages, parentMessageId) {
|
||||
const orderedMessages = this.constructor.getMessagesForConversation({
|
||||
messages,
|
||||
parentMessageId,
|
||||
});
|
||||
|
||||
logger.debug('[GoogleClient]', {
|
||||
orderedMessages,
|
||||
parentMessageId,
|
||||
});
|
||||
|
||||
const formattedMessages = orderedMessages.map((message) => ({
|
||||
author: message.isCreatedByUser ? this.userLabel : this.modelLabel,
|
||||
content: message?.content ?? message.text,
|
||||
}));
|
||||
|
||||
let lastAuthor = '';
|
||||
let groupedMessages = [];
|
||||
|
||||
for (let message of formattedMessages) {
|
||||
// If last author is not same as current author, add to new group
|
||||
if (lastAuthor !== message.author) {
|
||||
groupedMessages.push({
|
||||
author: message.author,
|
||||
content: [message.content],
|
||||
});
|
||||
lastAuthor = message.author;
|
||||
// If same author, append content to the last group
|
||||
} else {
|
||||
groupedMessages[groupedMessages.length - 1].content.push(message.content);
|
||||
}
|
||||
}
|
||||
|
||||
let identityPrefix = '';
|
||||
if (this.options.userLabel) {
|
||||
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
|
||||
}
|
||||
|
||||
if (this.options.modelLabel) {
|
||||
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
|
||||
}
|
||||
|
||||
let promptPrefix = (this.options.promptPrefix || '').trim();
|
||||
if (promptPrefix) {
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `\nContext:\n${promptPrefix}`;
|
||||
}
|
||||
|
||||
if (identityPrefix) {
|
||||
promptPrefix = `${identityPrefix}${promptPrefix}`;
|
||||
}
|
||||
|
||||
// Prompt AI to respond, empty if last message was from AI
|
||||
let isEdited = lastAuthor === this.modelLabel;
|
||||
const promptSuffix = isEdited ? '' : `${promptPrefix}\n\n${this.modelLabel}:\n`;
|
||||
let currentTokenCount = isEdited
|
||||
? this.getTokenCount(promptPrefix)
|
||||
: this.getTokenCount(promptSuffix);
|
||||
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
|
||||
const context = [];
|
||||
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
// Also, remove the next message when the message that puts us over the token limit is created by the user.
|
||||
// Otherwise, remove only the exceeding message. This is due to Anthropic's strict payload rule to start with "Human:".
|
||||
const nextMessage = {
|
||||
remove: false,
|
||||
tokenCount: 0,
|
||||
messageString: '',
|
||||
};
|
||||
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && groupedMessages.length > 0) {
|
||||
const message = groupedMessages.pop();
|
||||
const isCreatedByUser = message.author === this.userLabel;
|
||||
// Use promptPrefix if message is edited assistant'
|
||||
const messagePrefix =
|
||||
isCreatedByUser || !isEdited
|
||||
? `\n\n${message.author}:`
|
||||
: `${promptPrefix}\n\n${message.author}:`;
|
||||
const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`;
|
||||
let newPromptBody = `${messageString}${promptBody}`;
|
||||
|
||||
context.unshift(message);
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
|
||||
if (!isCreatedByUser) {
|
||||
nextMessage.messageString = messageString;
|
||||
nextMessage.tokenCount = tokenCountForMessage;
|
||||
}
|
||||
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (!promptBody) {
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Otherwise, ths message would put us over the token limit, so don't add it.
|
||||
// if created by user, remove next message, otherwise remove only this message
|
||||
if (isCreatedByUser) {
|
||||
nextMessage.remove = true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
|
||||
// Switch off isEdited after using it for the first time
|
||||
if (isEdited) {
|
||||
isEdited = false;
|
||||
}
|
||||
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
|
||||
if (nextMessage.remove) {
|
||||
promptBody = promptBody.replace(nextMessage.messageString, '');
|
||||
currentTokenCount -= nextMessage.tokenCount;
|
||||
context.shift();
|
||||
}
|
||||
|
||||
let prompt = `${promptBody}${promptSuffix}`.trim();
|
||||
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.maxOutputTokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
return { prompt, context };
|
||||
}
|
||||
|
||||
async _getCompletion(payload, abortController = null) {
|
||||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
}
|
||||
const { debug } = this.options;
|
||||
const url = this.completionsUrl;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(url);
|
||||
console.debug(this.modelOptions);
|
||||
console.debug();
|
||||
logger.debug('GoogleClient _getCompletion', { url, payload });
|
||||
}
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
@@ -208,10 +473,88 @@ class GoogleClient extends BaseClient {
|
||||
|
||||
const client = await this.getClient();
|
||||
const res = await client.request({ url, method: 'POST', data: payload });
|
||||
console.dir(res.data, { depth: null });
|
||||
logger.debug('GoogleClient _getCompletion', { res });
|
||||
return res.data;
|
||||
}
|
||||
|
||||
createLLM(clientOptions) {
|
||||
if (this.isGenerativeModel) {
|
||||
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
|
||||
}
|
||||
|
||||
return this.isTextModel
|
||||
? new GoogleVertexAI(clientOptions)
|
||||
: new ChatGoogleVertexAI(clientOptions);
|
||||
}
|
||||
|
||||
async getCompletion(_payload, options = {}) {
|
||||
const { onProgress, abortController } = options;
|
||||
const { parameters, instances } = _payload;
|
||||
const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {};
|
||||
|
||||
let examples;
|
||||
|
||||
let clientOptions = { ...parameters, maxRetries: 2 };
|
||||
|
||||
if (!this.isGenerativeModel) {
|
||||
clientOptions['authOptions'] = {
|
||||
credentials: {
|
||||
...this.serviceKey,
|
||||
},
|
||||
projectId: this.project_id,
|
||||
};
|
||||
}
|
||||
|
||||
if (!parameters) {
|
||||
clientOptions = { ...clientOptions, ...this.modelOptions };
|
||||
}
|
||||
|
||||
if (this.isGenerativeModel) {
|
||||
clientOptions.modelName = clientOptions.model;
|
||||
delete clientOptions.model;
|
||||
}
|
||||
|
||||
if (_examples && _examples.length) {
|
||||
examples = _examples
|
||||
.map((ex) => {
|
||||
const { input, output } = ex;
|
||||
if (!input || !output) {
|
||||
return undefined;
|
||||
}
|
||||
return {
|
||||
input: new HumanMessage(input.content),
|
||||
output: new AIMessage(output.content),
|
||||
};
|
||||
})
|
||||
.filter((ex) => ex);
|
||||
|
||||
clientOptions.examples = examples;
|
||||
}
|
||||
|
||||
const model = this.createLLM(clientOptions);
|
||||
|
||||
let reply = '';
|
||||
const messages = this.isTextModel ? _payload.trim() : _messages;
|
||||
|
||||
if (!this.isVisionModel && context && messages?.length > 0) {
|
||||
messages.unshift(new SystemMessage(context));
|
||||
}
|
||||
|
||||
const stream = await model.stream(messages, {
|
||||
signal: abortController.signal,
|
||||
timeout: 7000,
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
await this.generateTextStream(chunk?.content ?? chunk, onProgress, {
|
||||
delay: this.isGenerativeModel ? 12 : 8,
|
||||
});
|
||||
reply += chunk?.content ?? chunk;
|
||||
}
|
||||
|
||||
return reply;
|
||||
}
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
@@ -221,38 +564,12 @@ class GoogleClient extends BaseClient {
|
||||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
// console.log('GoogleClient doesn\'t use getBuildMessagesOptions');
|
||||
// logger.debug('GoogleClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
|
||||
async sendCompletion(payload, opts = {}) {
|
||||
console.log('GoogleClient: sendcompletion', payload, opts);
|
||||
let reply = '';
|
||||
let blocked = false;
|
||||
try {
|
||||
const result = await this.getCompletion(payload, opts.abortController);
|
||||
blocked = result?.predictions?.[0]?.safetyAttributes?.blocked;
|
||||
reply =
|
||||
result?.predictions?.[0]?.candidates?.[0]?.content ||
|
||||
result?.predictions?.[0]?.content ||
|
||||
'';
|
||||
if (blocked === true) {
|
||||
reply = `Google blocked a proper response to your message:\n${JSON.stringify(
|
||||
result.predictions[0].safetyAttributes,
|
||||
)}${reply.length > 0 ? `\nAI Response:\n${reply}` : ''}`;
|
||||
}
|
||||
if (this.options.debug) {
|
||||
console.debug('result');
|
||||
console.debug(result);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error: failed to send completion to Google');
|
||||
console.error(err.message);
|
||||
}
|
||||
|
||||
if (!blocked) {
|
||||
await this.generateTextStream(reply, opts.onProgress, { delay: 0.5 });
|
||||
}
|
||||
|
||||
reply = await this.getCompletion(payload, opts);
|
||||
return reply.trim();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,17 +1,20 @@
|
||||
const OpenAI = require('openai');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('../../utils');
|
||||
const { encodeAndFormat, validateVisionModel } = require('~/server/services/Files/images');
|
||||
const { getModelMaxTokens, genAzureChatCompletion, extractBaseURL } = require('~/utils');
|
||||
const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
|
||||
const spendTokens = require('../../models/spendTokens');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const { isEnabled } = require('../../server/utils');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { createLLM, RunManager } = require('./llm');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const { summaryBuffer } = require('./memory');
|
||||
const { runTitleChain } = require('./chains');
|
||||
const { tokenSplit } = require('./document');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
// Cache to store Tiktoken instances
|
||||
const tokenizersCache = {};
|
||||
@@ -24,7 +27,6 @@ class OpenAIClient extends BaseClient {
|
||||
this.ChatGPTClient = new ChatGPTClient();
|
||||
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
|
||||
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
|
||||
this.sender = options.sender ?? 'ChatGPT';
|
||||
this.contextStrategy = options.contextStrategy
|
||||
? options.contextStrategy.toLowerCase()
|
||||
: 'discard';
|
||||
@@ -33,6 +35,7 @@ class OpenAIClient extends BaseClient {
|
||||
this.setOptions(options);
|
||||
}
|
||||
|
||||
// TODO: PluginsClient calls this 3x, unneeded
|
||||
setOptions(options) {
|
||||
if (this.options && !this.options.replaceOptions) {
|
||||
this.options.modelOptions = {
|
||||
@@ -53,6 +56,7 @@ class OpenAIClient extends BaseClient {
|
||||
}
|
||||
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
|
||||
if (!this.modelOptions) {
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
@@ -72,6 +76,17 @@ class OpenAIClient extends BaseClient {
|
||||
};
|
||||
}
|
||||
|
||||
this.isVisionModel = validateVisionModel(this.modelOptions.model);
|
||||
|
||||
if (this.options.attachments && !this.isVisionModel) {
|
||||
this.modelOptions.model = 'gpt-4-vision-preview';
|
||||
this.isVisionModel = true;
|
||||
}
|
||||
|
||||
if (this.isVisionModel) {
|
||||
delete this.modelOptions.stop;
|
||||
}
|
||||
|
||||
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
|
||||
if (OPENROUTER_API_KEY && !this.azure) {
|
||||
this.apiKey = OPENROUTER_API_KEY;
|
||||
@@ -92,7 +107,7 @@ class OpenAIClient extends BaseClient {
|
||||
|
||||
const { model } = this.modelOptions;
|
||||
|
||||
this.isChatCompletion = this.useOpenRouter || !!reverseProxy || model.includes('gpt-');
|
||||
this.isChatCompletion = this.useOpenRouter || !!reverseProxy || model.includes('gpt');
|
||||
this.isChatGptModel = this.isChatCompletion;
|
||||
if (
|
||||
model.includes('text-davinci') ||
|
||||
@@ -112,7 +127,7 @@ class OpenAIClient extends BaseClient {
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('maxContextTokens', this.maxContextTokens);
|
||||
logger.debug('[OpenAIClient] maxContextTokens', this.maxContextTokens);
|
||||
}
|
||||
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
@@ -127,12 +142,20 @@ class OpenAIClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
this.sender =
|
||||
this.options.sender ??
|
||||
getResponseSender({
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
});
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
this.chatGptLabel = this.options.chatGptLabel || 'Assistant';
|
||||
|
||||
this.setupTokens();
|
||||
|
||||
if (!this.modelOptions.stop) {
|
||||
if (!this.modelOptions.stop && !this.isVisionModel) {
|
||||
const stopTokens = [this.startToken];
|
||||
if (this.endToken && this.endToken !== this.startToken) {
|
||||
stopTokens.push(this.endToken);
|
||||
@@ -145,10 +168,6 @@ class OpenAIClient extends BaseClient {
|
||||
if (reverseProxy) {
|
||||
this.completionsUrl = reverseProxy;
|
||||
this.langchainProxy = extractBaseURL(reverseProxy);
|
||||
!this.langchainProxy &&
|
||||
console.warn(`The reverse proxy URL ${reverseProxy} is not valid for Plugins.
|
||||
The url must follow OpenAI specs, for example: https://localhost:8080/v1/chat/completions
|
||||
If your reverse proxy is compatible to OpenAI specs in every other way, it may still work without plugins enabled.`);
|
||||
} else if (isChatGptModel) {
|
||||
this.completionsUrl = 'https://api.openai.com/v1/chat/completions';
|
||||
} else {
|
||||
@@ -160,7 +179,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
}
|
||||
|
||||
if (this.azureEndpoint && this.options.debug) {
|
||||
console.debug('Using Azure endpoint');
|
||||
logger.debug('Using Azure endpoint');
|
||||
}
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
@@ -239,8 +258,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
// Reset count
|
||||
tokenizerCallsCount = 1;
|
||||
} catch (error) {
|
||||
console.log('Free and reset encoders error');
|
||||
console.error(error);
|
||||
logger.error('[OpenAIClient] Free and reset encoders error', error);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +266,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
resetTokenizersIfNecessary() {
|
||||
if (tokenizerCallsCount >= 25) {
|
||||
if (this.options.debug) {
|
||||
console.debug('freeAndResetAllEncoders: reached 25 encodings, resetting...');
|
||||
logger.debug('[OpenAIClient] freeAndResetAllEncoders: reached 25 encodings, resetting...');
|
||||
}
|
||||
this.constructor.freeAndResetAllEncoders();
|
||||
}
|
||||
@@ -288,6 +306,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
messages,
|
||||
parentMessageId,
|
||||
{ isChatCompletion = false, promptPrefix = null },
|
||||
opts,
|
||||
) {
|
||||
let orderedMessages = this.constructor.getMessagesForConversation({
|
||||
messages,
|
||||
@@ -320,6 +339,17 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.attachments) {
|
||||
const attachments = await this.options.attachments;
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
this.options.req,
|
||||
attachments.filter((file) => file.type.includes('image')),
|
||||
);
|
||||
|
||||
orderedMessages[orderedMessages.length - 1].image_urls = image_urls;
|
||||
this.options.attachments = files;
|
||||
}
|
||||
|
||||
const formattedMessages = orderedMessages.map((message, i) => {
|
||||
const formattedMessage = formatMessage({
|
||||
message,
|
||||
@@ -354,8 +384,8 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
result.tokenCountMap = tokenCountMap;
|
||||
}
|
||||
|
||||
if (promptTokens >= 0 && typeof this.options.getReqData === 'function') {
|
||||
this.options.getReqData({ promptTokens });
|
||||
if (promptTokens >= 0 && typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({ promptTokens });
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -367,7 +397,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
let streamResult = null;
|
||||
this.modelOptions.user = this.user;
|
||||
const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null;
|
||||
const useOldMethod = !!(this.azure || invalidBaseUrl || !this.isChatCompletion);
|
||||
const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion);
|
||||
if (typeof opts.onProgress === 'function' && useOldMethod) {
|
||||
await this.getCompletion(
|
||||
payload,
|
||||
@@ -376,11 +406,6 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
// console.debug('progressMessage');
|
||||
// console.dir(progressMessage, { depth: null });
|
||||
}
|
||||
|
||||
if (progressMessage.choices) {
|
||||
streamResult = progressMessage;
|
||||
}
|
||||
@@ -400,9 +425,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
if (this.options.debug) {
|
||||
// console.debug(token);
|
||||
}
|
||||
|
||||
if (token === this.endToken) {
|
||||
return;
|
||||
}
|
||||
@@ -424,9 +447,9 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
null,
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
if (this.options.debug) {
|
||||
console.debug(JSON.stringify(result));
|
||||
}
|
||||
|
||||
logger.debug('[OpenAIClient] sendCompletion: result', result);
|
||||
|
||||
if (this.isChatCompletion) {
|
||||
reply = result.choices[0].message.content;
|
||||
} else {
|
||||
@@ -530,11 +553,13 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
this.options.debug && console.debug('Aborted title generation');
|
||||
logger.debug('[OpenAIClient] Aborted title generation');
|
||||
return;
|
||||
}
|
||||
console.log('There was an issue generating title with LangChain, trying the old method...');
|
||||
this.options.debug && console.error(e.message, e);
|
||||
logger.error(
|
||||
'[OpenAIClient] There was an issue generating title with LangChain, trying the old method...',
|
||||
e,
|
||||
);
|
||||
modelOptions.model = OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
|
||||
if (this.azure) {
|
||||
modelOptions.model = process.env.AZURE_OPENAI_DEFAULT_MODEL ?? modelOptions.model;
|
||||
@@ -555,17 +580,16 @@ ${convo}
|
||||
try {
|
||||
title = (await this.sendPayload(instructionsPayload, { modelOptions })).replaceAll('"', '');
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
console.log('There was another issue generating the title, see error above.');
|
||||
logger.error('[OpenAIClient] There was another issue generating the title', e);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('CONVERSATION TITLE', title);
|
||||
logger.debug('[OpenAIClient] Convo Title: ' + title);
|
||||
return title;
|
||||
}
|
||||
|
||||
async summarizeMessages({ messagesToRefine, remainingContextTokens }) {
|
||||
this.options.debug && console.debug('Summarizing messages...');
|
||||
logger.debug('[OpenAIClient] Summarizing messages...');
|
||||
let context = messagesToRefine;
|
||||
let prompt;
|
||||
|
||||
@@ -588,8 +612,9 @@ ${convo}
|
||||
}
|
||||
|
||||
if (context.length === 0) {
|
||||
this.options.debug &&
|
||||
console.debug('Summary context is empty, using latest message within token limit');
|
||||
logger.debug(
|
||||
'[OpenAIClient] Summary context is empty, using latest message within token limit',
|
||||
);
|
||||
|
||||
promptBuffer = 32;
|
||||
const { text, ...latestMessage } = messagesToRefine[messagesToRefine.length - 1];
|
||||
@@ -616,7 +641,7 @@ ${convo}
|
||||
// by recreating the summary prompt (single message) to avoid LangChain handling
|
||||
|
||||
const initialPromptTokens = this.maxContextTokens - remainingContextTokens;
|
||||
this.options.debug && console.debug(`initialPromptTokens: ${initialPromptTokens}`);
|
||||
logger.debug('[OpenAIClient] initialPromptTokens', initialPromptTokens);
|
||||
|
||||
const llm = this.initializeLLM({
|
||||
model: OPENAI_SUMMARY_MODEL,
|
||||
@@ -642,9 +667,9 @@ ${convo}
|
||||
const summaryTokenCount = this.getTokenCountForMessage(summaryMessage);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('summaryMessage:', summaryMessage);
|
||||
console.debug(
|
||||
`remainingContextTokens: ${remainingContextTokens}, after refining: ${
|
||||
logger.debug('[OpenAIClient] summaryTokenCount', summaryTokenCount);
|
||||
logger.debug(
|
||||
`[OpenAIClient] Summarization complete: remainingContextTokens: ${remainingContextTokens}, after refining: ${
|
||||
remainingContextTokens - summaryTokenCount
|
||||
}`,
|
||||
);
|
||||
@@ -653,7 +678,7 @@ ${convo}
|
||||
return { summaryMessage, summaryTokenCount };
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
this.options.debug && console.debug('Aborted summarization');
|
||||
logger.debug('[OpenAIClient] Aborted summarization');
|
||||
const { run, runId } = this.runManager.getRunByConversationId(this.conversationId);
|
||||
if (run && run.error) {
|
||||
const { error } = run;
|
||||
@@ -661,17 +686,13 @@ ${convo}
|
||||
throw new Error(error);
|
||||
}
|
||||
}
|
||||
console.error('Error summarizing messages');
|
||||
this.options.debug && console.error(e);
|
||||
logger.error('[OpenAIClient] Error summarizing messages', e);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
async recordTokenUsage({ promptTokens, completionTokens }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('promptTokens', promptTokens);
|
||||
console.debug('completionTokens', completionTokens);
|
||||
}
|
||||
logger.debug('[OpenAIClient] recordTokenUsage:', { promptTokens, completionTokens });
|
||||
await spendTokens(
|
||||
{
|
||||
user: this.user,
|
||||
@@ -709,14 +730,19 @@ ${convo}
|
||||
modelOptions.prompt = payload;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
const url = extractBaseURL(this.completionsUrl);
|
||||
if (debug) {
|
||||
console.debug('baseURL', url);
|
||||
console.debug('modelOptions', modelOptions);
|
||||
}
|
||||
const baseURL = extractBaseURL(this.completionsUrl);
|
||||
// let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions;
|
||||
// if (modelOptionsToLog.messages) {
|
||||
// _msgsToLog = modelOptionsToLog.messages.map((msg) => {
|
||||
// let { content, ...rest } = msg;
|
||||
|
||||
// if (content)
|
||||
// return { ...rest, content: truncateText(content) };
|
||||
// });
|
||||
// }
|
||||
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
|
||||
const opts = {
|
||||
baseURL: url,
|
||||
baseURL,
|
||||
};
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
@@ -734,12 +760,26 @@ ${convo}
|
||||
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
if (this.isVisionModel) {
|
||||
modelOptions.max_tokens = 4000;
|
||||
}
|
||||
|
||||
if (this.azure || this.options.azure) {
|
||||
// Azure does not accept `model` in the body, so we need to remove it.
|
||||
delete modelOptions.model;
|
||||
|
||||
opts.baseURL = this.azureEndpoint.split('/chat')[0];
|
||||
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
|
||||
opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
|
||||
}
|
||||
|
||||
let chatCompletion;
|
||||
const openai = new OpenAI({
|
||||
apiKey: this.apiKey,
|
||||
...opts,
|
||||
});
|
||||
|
||||
let UnexpectedRoleError = false;
|
||||
if (modelOptions.stream) {
|
||||
const stream = await openai.beta.chat.completions
|
||||
.stream({
|
||||
@@ -751,6 +791,12 @@ ${convo}
|
||||
})
|
||||
.on('error', (err) => {
|
||||
handleOpenAIErrors(err, errorCallback, 'stream');
|
||||
})
|
||||
.on('finalMessage', (message) => {
|
||||
if (message?.role !== 'assistant') {
|
||||
stream.messages.push({ role: 'assistant', content: intermediateReply });
|
||||
UnexpectedRoleError = true;
|
||||
}
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
@@ -763,9 +809,11 @@ ${convo}
|
||||
}
|
||||
}
|
||||
|
||||
chatCompletion = await stream.finalChatCompletion().catch((err) => {
|
||||
handleOpenAIErrors(err, errorCallback, 'finalChatCompletion');
|
||||
});
|
||||
if (!UnexpectedRoleError) {
|
||||
chatCompletion = await stream.finalChatCompletion().catch((err) => {
|
||||
handleOpenAIErrors(err, errorCallback, 'finalChatCompletion');
|
||||
});
|
||||
}
|
||||
}
|
||||
// regular completion
|
||||
else {
|
||||
@@ -778,7 +826,11 @@ ${convo}
|
||||
});
|
||||
}
|
||||
|
||||
if (!chatCompletion && error) {
|
||||
if (!chatCompletion && UnexpectedRoleError) {
|
||||
throw new Error(
|
||||
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
|
||||
);
|
||||
} else if (!chatCompletion && error) {
|
||||
throw new Error(error);
|
||||
} else if (!chatCompletion) {
|
||||
throw new Error('Chat completion failed');
|
||||
@@ -798,23 +850,25 @@ ${convo}
|
||||
return '';
|
||||
}
|
||||
if (
|
||||
err?.message?.includes(
|
||||
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
|
||||
) ||
|
||||
err?.message?.includes('The server had an error processing your request') ||
|
||||
err?.message?.includes('missing finish_reason') ||
|
||||
err?.message?.includes('missing role') ||
|
||||
(err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason'))
|
||||
) {
|
||||
logger.error('[OpenAIClient] Known OpenAI error:', err);
|
||||
await abortController.abortCompletion();
|
||||
return intermediateReply;
|
||||
} else if (err instanceof OpenAI.APIError) {
|
||||
console.log(err.name);
|
||||
console.log(err.status);
|
||||
console.log(err.headers);
|
||||
if (intermediateReply) {
|
||||
return intermediateReply;
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
console.warn('[OpenAIClient.chatCompletion] Unhandled error type');
|
||||
console.error(err);
|
||||
logger.error('[OpenAIClient.chatCompletion] Unhandled error type', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,12 +3,14 @@ const { CallbackManager } = require('langchain/callbacks');
|
||||
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
|
||||
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
|
||||
const checkBalance = require('../../models/checkBalance');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { formatLangChainMessages } = require('./prompts');
|
||||
const { isEnabled } = require('../../server/utils');
|
||||
const { extractBaseURL } = require('../../utils');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { SelfReflectionTool } = require('./tools');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
@@ -36,10 +38,6 @@ class PluginsClient extends OpenAIClient {
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
|
||||
!this.langchainProxy &&
|
||||
console.warn(`The reverse proxy URL ${this.options.reverseProxyUrl} is not valid for Plugins.
|
||||
The url must follow OpenAI specs, for example: https://localhost:8080/v1/chat/completions
|
||||
If your reverse proxy is compatible to OpenAI specs in every other way, it may still work without plugins enabled.`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +55,9 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
}
|
||||
|
||||
getFunctionModelName(input) {
|
||||
if (input.includes('gpt-3.5-turbo')) {
|
||||
if (/-(?!0314)\d{4}/.test(input)) {
|
||||
return input;
|
||||
} else if (input.includes('gpt-3.5-turbo')) {
|
||||
return 'gpt-3.5-turbo';
|
||||
} else if (input.includes('gpt-4')) {
|
||||
return 'gpt-4';
|
||||
@@ -86,17 +86,15 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
initialMessageCount: this.currentMessages.length + 1,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(
|
||||
`<-----Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}----->`,
|
||||
);
|
||||
}
|
||||
logger.debug(
|
||||
`[PluginsClient] Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}`,
|
||||
);
|
||||
|
||||
// Map Messages to Langchain format
|
||||
const pastMessages = formatLangChainMessages(this.currentMessages.slice(0, -1), {
|
||||
userName: this.options?.name,
|
||||
});
|
||||
this.options.debug && console.debug('pastMessages: ', pastMessages);
|
||||
logger.debug('[PluginsClient] pastMessages: ' + pastMessages.length);
|
||||
|
||||
// TODO: use readOnly memory, TokenBufferMemory? (both unavailable in LangChainJS)
|
||||
const memory = new BufferMemory({
|
||||
@@ -125,19 +123,16 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Requested Tools');
|
||||
console.debug(this.options.tools);
|
||||
console.debug('Loaded Tools');
|
||||
console.debug(this.tools.map((tool) => tool.name));
|
||||
}
|
||||
logger.debug('[PluginsClient] Requested Tools', this.options.tools);
|
||||
logger.debug(
|
||||
'[PluginsClient] Loaded Tools',
|
||||
this.tools.map((tool) => tool.name),
|
||||
);
|
||||
|
||||
const handleAction = (action, runId, callback = null) => {
|
||||
this.saveLatestAction(action);
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
}
|
||||
logger.debug('[PluginsClient] Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(action, runId);
|
||||
@@ -166,9 +161,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
}),
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Loaded agent.');
|
||||
}
|
||||
logger.debug('[PluginsClient] Loaded agent.');
|
||||
}
|
||||
|
||||
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
|
||||
@@ -184,12 +177,10 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
});
|
||||
const input = attempts > 1 ? errorInput : message;
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug(`Attempt ${attempts} of ${maxAttempts}`);
|
||||
}
|
||||
logger.debug(`[PluginsClient] Attempt ${attempts} of ${maxAttempts}`);
|
||||
|
||||
if (this.options.debug && errorMessage.length > 0) {
|
||||
console.debug('Caught error, input:', input);
|
||||
if (errorMessage.length > 0) {
|
||||
logger.debug('[PluginsClient] Caught error, input: ' + JSON.stringify(input));
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -212,10 +203,10 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
]);
|
||||
break; // Exit the loop if the function call is successful
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
logger.error('[PluginsClient] executorCall error:', err);
|
||||
if (attempts === maxAttempts) {
|
||||
const { run } = this.runManager.getRunByConversationId(this.conversationId);
|
||||
const defaultOutput = `Encountered an error while attempting to respond. Error: ${err.message}`;
|
||||
const defaultOutput = `Encountered an error while attempting to respond: ${err.message}`;
|
||||
this.result.output = run && run.error ? run.error : defaultOutput;
|
||||
this.result.errorMessage = run && run.error ? run.error : err.message;
|
||||
this.result.intermediateSteps = this.actions;
|
||||
@@ -227,8 +218,11 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
|
||||
async handleResponseMessage(responseMessage, saveOptions, user) {
|
||||
const { output, errorMessage, ...result } = this.result;
|
||||
this.options.debug &&
|
||||
console.debug('[handleResponseMessage] Output:', { output, errorMessage, ...result });
|
||||
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
|
||||
output,
|
||||
errorMessage,
|
||||
...result,
|
||||
});
|
||||
const { error } = responseMessage;
|
||||
if (!error) {
|
||||
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
|
||||
@@ -252,7 +246,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
this.setOptions(opts);
|
||||
return super.sendMessage(message, opts);
|
||||
}
|
||||
this.options.debug && console.log('Plugins sendMessage', message, opts);
|
||||
logger.debug('[PluginsClient] sendMessage', { message, opts });
|
||||
const {
|
||||
user,
|
||||
isEdited,
|
||||
@@ -282,10 +276,10 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
console.dir(tokenCountMap, { depth: null });
|
||||
logger.debug('[PluginsClient] tokenCountMap', { tokenCountMap });
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
console.log('userMessage.tokenCount', userMessage.tokenCount);
|
||||
logger.debug('[PluginsClient] userMessage.tokenCount', userMessage.tokenCount);
|
||||
}
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
}
|
||||
@@ -306,6 +300,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
amount: promptTokens,
|
||||
debug: this.options.debug,
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
});
|
||||
}
|
||||
@@ -370,10 +365,7 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Plugins completion phase: this.result');
|
||||
console.debug(this.result);
|
||||
}
|
||||
logger.debug('[PluginsClient] Completion phase: this.result', this.result);
|
||||
|
||||
const promptPrefix = buildPromptPrefix({
|
||||
result: this.result,
|
||||
@@ -381,28 +373,20 @@ If your reverse proxy is compatible to OpenAI specs in every other way, it may s
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('Plugins: promptPrefix');
|
||||
console.debug(promptPrefix);
|
||||
}
|
||||
logger.debug('[PluginsClient]', { promptPrefix });
|
||||
|
||||
payload = await this.buildCompletionPrompt({
|
||||
messages: this.currentMessages,
|
||||
promptPrefix,
|
||||
});
|
||||
|
||||
if (this.options.debug) {
|
||||
console.debug('buildCompletionPrompt Payload');
|
||||
console.debug(payload);
|
||||
}
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt Payload', payload);
|
||||
responseMessage.text = await this.sendCompletion(payload, opts);
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
async buildCompletionPrompt({ messages, promptPrefix: _promptPrefix }) {
|
||||
if (this.options.debug) {
|
||||
console.debug('buildCompletionPrompt messages', messages);
|
||||
}
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt messages', messages);
|
||||
|
||||
const orderedMessages = messages;
|
||||
let promptPrefix = _promptPrefix.trim();
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const { Readable } = require('stream');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class TextStream extends Readable {
|
||||
constructor(text, options = {}) {
|
||||
@@ -38,7 +39,7 @@ class TextStream extends Readable {
|
||||
});
|
||||
|
||||
this.on('end', () => {
|
||||
// console.log('Stream ended');
|
||||
// logger.debug('[processTextStream] Stream ended');
|
||||
resolve();
|
||||
});
|
||||
|
||||
@@ -50,7 +51,7 @@ class TextStream extends Readable {
|
||||
try {
|
||||
await streamPromise;
|
||||
} catch (err) {
|
||||
console.error('Error processing text stream:', err);
|
||||
logger.error('[processTextStream] Error in text stream:', err);
|
||||
// Handle the error appropriately, e.g., return an error message or throw an error
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const { ZeroShotAgentOutputParser } = require('langchain/agents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
constructor(fields) {
|
||||
@@ -64,9 +65,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
const match = this.actionValues.exec(text); // old v2
|
||||
|
||||
if (!match) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO MATCH PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT NO MATCH PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
const thoughts = text.replace(/[tT]hought:/, '').split('\n');
|
||||
// return {
|
||||
@@ -84,9 +85,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
let selectedTool = match?.[1].trim().toLowerCase();
|
||||
|
||||
if (match && selectedTool === 'n/a') {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT N/A PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT N/A PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
@@ -97,25 +98,25 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
|
||||
let toolIsValid = this.checkIfValidTool(selectedTool);
|
||||
if (match && !toolIsValid) {
|
||||
console.log(
|
||||
'\n\n<----------------Tool invalid: Re-assigning Selected Tool---------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------[CustomOutputParser] Tool invalid: Re-assigning Selected Tool---------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
selectedTool = this.getValidTool(selectedTool);
|
||||
}
|
||||
|
||||
if (match && !selectedTool) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT INVALID TOOL PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT INVALID TOOL PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
selectedTool = 'self-reflection';
|
||||
}
|
||||
|
||||
if (match && !match[2]) {
|
||||
console.log(
|
||||
'\n\n<----------------------HIT NO ACTION INPUT PARSING ERROR---------------------->\n\n',
|
||||
match,
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT NO ACTION INPUT PARSING ERROR---------------------->\n\n' +
|
||||
match,
|
||||
);
|
||||
|
||||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
@@ -139,7 +140,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
}
|
||||
|
||||
if (match && selectedTool.length > this.longestToolName.length) {
|
||||
console.log('\n\n<----------------------HIT LONG PARSING ERROR---------------------->\n\n');
|
||||
logger.debug(
|
||||
'\n\n<----------------------[CustomOutputParser] HIT LONG PARSING ERROR---------------------->\n\n',
|
||||
);
|
||||
|
||||
let action, input, thought;
|
||||
let firstIndex = Infinity;
|
||||
@@ -156,9 +159,9 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
// In case there is no action input, let's double-check if there is an action input in 'text' variable
|
||||
const actionInputMatch = this.actionInputRegex.exec(text);
|
||||
if (action && actionInputMatch) {
|
||||
console.log(
|
||||
'\n\n<------Matched Action Input in Long Parsing Error------>\n\n',
|
||||
actionInputMatch,
|
||||
logger.debug(
|
||||
'\n\n<------[CustomOutputParser] Matched Action Input in Long Parsing Error------>\n\n' +
|
||||
actionInputMatch,
|
||||
);
|
||||
return {
|
||||
tool: action,
|
||||
@@ -185,15 +188,14 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
|
||||
const inputMatch = this.actionValues.exec(returnValues.log); //new
|
||||
if (inputMatch) {
|
||||
console.log('inputMatch');
|
||||
console.dir(inputMatch, { depth: null });
|
||||
logger.debug('[CustomOutputParser] inputMatch', inputMatch);
|
||||
returnValues.toolInput = inputMatch[1].replaceAll('"', '').trim();
|
||||
returnValues.log = returnValues.log.replace(this.actionValues, '');
|
||||
}
|
||||
|
||||
return returnValues;
|
||||
} else {
|
||||
console.log('No valid tool mentioned.', this.tools, text);
|
||||
logger.debug('[CustomOutputParser] No valid tool mentioned.', this.tools, text);
|
||||
return {
|
||||
tool: 'self-reflection',
|
||||
toolInput: 'Hypothetical actions: \n"' + text + '"\n',
|
||||
@@ -202,8 +204,8 @@ class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
}
|
||||
|
||||
// if (action && input) {
|
||||
// console.log('Action:', action);
|
||||
// console.log('Input:', input);
|
||||
// logger.debug('Action:', action);
|
||||
// logger.debug('Input:', input);
|
||||
// }
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,8 @@ const {
|
||||
SystemMessagePromptTemplate,
|
||||
HumanMessagePromptTemplate,
|
||||
} = require('langchain/prompts');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const PREFIX = 'You are a helpful AI assistant.';
|
||||
|
||||
function parseOutput(message) {
|
||||
@@ -112,7 +114,7 @@ class FunctionsAgent extends Agent {
|
||||
valuesForLLM,
|
||||
callbackManager,
|
||||
);
|
||||
console.log('message', message);
|
||||
logger.debug('[FunctionsAgent] plan message', message);
|
||||
return parseOutput(message);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
const { promptTokensEstimate } = require('openai-chat-tokens');
|
||||
const checkBalance = require('../../../models/checkBalance');
|
||||
const { isEnabled } = require('../../../server/utils');
|
||||
const { formatFromLangChain } = require('../prompts');
|
||||
const { EModelEndpoint, supportsBalanceCheck } = require('librechat-data-provider');
|
||||
const { formatFromLangChain } = require('~/app/clients/prompts');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const createStartHandler = ({
|
||||
context,
|
||||
@@ -15,9 +17,15 @@ const createStartHandler = ({
|
||||
const { model, functions, function_call } = invocation_params;
|
||||
const messages = _messages[0].map(formatFromLangChain);
|
||||
|
||||
if (manager.debug) {
|
||||
console.log(`handleChatModelStart: ${context}`);
|
||||
console.dir({ model, functions, function_call }, { depth: null });
|
||||
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
||||
model,
|
||||
function_call,
|
||||
});
|
||||
|
||||
if (context !== 'title') {
|
||||
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
||||
functions,
|
||||
});
|
||||
}
|
||||
|
||||
const payload = { messages };
|
||||
@@ -34,13 +42,15 @@ const createStartHandler = ({
|
||||
}
|
||||
|
||||
prelimPromptTokens += promptTokensEstimate(payload);
|
||||
if (manager.debug) {
|
||||
console.log('Prelim Prompt Tokens & Token Buffer', prelimPromptTokens, tokenBuffer);
|
||||
}
|
||||
logger.debug('[createStartHandler]', {
|
||||
prelimPromptTokens,
|
||||
tokenBuffer,
|
||||
});
|
||||
prelimPromptTokens += tokenBuffer;
|
||||
|
||||
try {
|
||||
if (isEnabled(process.env.CHECK_BALANCE)) {
|
||||
// TODO: if plugins extends to non-OpenAI models, this will need to be updated
|
||||
if (isEnabled(process.env.CHECK_BALANCE) && supportsBalanceCheck[EModelEndpoint.openAI]) {
|
||||
const generations =
|
||||
initialMessageCount && messages.length > initialMessageCount
|
||||
? messages.slice(initialMessageCount)
|
||||
@@ -55,11 +65,12 @@ const createStartHandler = ({
|
||||
debug: manager.debug,
|
||||
generations,
|
||||
model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`[${context}] checkBalance error`, err);
|
||||
logger.error(`[createStartHandler][${context}] checkBalance error`, err);
|
||||
manager.abortController.abort();
|
||||
if (context === 'summary' || context === 'plugins') {
|
||||
manager.addRun(runId, { conversationId, error: err.message });
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
const { z } = require('zod');
|
||||
const { langPrompt, createTitlePrompt, escapeBraces, getSnippet } = require('../prompts');
|
||||
const { createStructuredOutputChainFromZod } = require('langchain/chains/openai_functions');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const langSchema = z.object({
|
||||
language: z.string().describe('The language of the input text (full noun, no abbreviations).'),
|
||||
@@ -30,8 +31,7 @@ const runTitleChain = async ({ llm, text, convo, signal, callbacks }) => {
|
||||
try {
|
||||
snippet = getSnippet(text);
|
||||
} catch (e) {
|
||||
console.log('Error getting snippet of text for titleChain');
|
||||
console.log(e);
|
||||
logger.error('[runTitleChain] Error getting snippet of text for titleChain', e);
|
||||
}
|
||||
const languageChain = createLanguageChain({ llm, callbacks });
|
||||
const titleChain = createTitleChain({ llm, callbacks, convo: escapeBraces(convo) });
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { createStartHandler } = require('../callbacks');
|
||||
const spendTokens = require('../../../models/spendTokens');
|
||||
const { createStartHandler } = require('~/app/clients/callbacks');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class RunManager {
|
||||
constructor(fields) {
|
||||
@@ -35,7 +36,7 @@ class RunManager {
|
||||
if (this.runs.has(runId)) {
|
||||
this.runs.delete(runId);
|
||||
} else {
|
||||
console.error(`Run with ID ${runId} does not exist.`);
|
||||
logger.error(`[api/app/clients/llm/RunManager] Run with ID ${runId} does not exist.`);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,10 +58,19 @@ class RunManager {
|
||||
{
|
||||
handleChatModelStart: createStartHandler({ ...metadata, manager: this }),
|
||||
handleLLMEnd: async (output, runId, _parentRunId) => {
|
||||
if (this.debug) {
|
||||
console.log(`handleLLMEnd: ${JSON.stringify(metadata)}`);
|
||||
console.dir({ output, runId, _parentRunId }, { depth: null });
|
||||
const { llmOutput, ..._output } = output;
|
||||
logger.debug(`[RunManager] handleLLMEnd: ${JSON.stringify(metadata)}`, {
|
||||
runId,
|
||||
_parentRunId,
|
||||
llmOutput,
|
||||
});
|
||||
|
||||
if (metadata.context !== 'title') {
|
||||
logger.debug('[RunManager] handleLLMEnd:', {
|
||||
output: _output,
|
||||
});
|
||||
}
|
||||
|
||||
const { tokenUsage } = output.llmOutput;
|
||||
const run = this.getRunById(runId);
|
||||
this.removeRun(runId);
|
||||
@@ -74,8 +84,7 @@ class RunManager {
|
||||
await spendTokens(txData, tokenUsage);
|
||||
},
|
||||
handleLLMError: async (err) => {
|
||||
this.debug && console.log(`handleLLMError: ${JSON.stringify(metadata)}`);
|
||||
this.debug && console.error(err);
|
||||
logger.error(`[RunManager] handleLLMError: ${JSON.stringify(metadata)}`, err);
|
||||
if (metadata.context === 'title') {
|
||||
return;
|
||||
} else if (metadata.context === 'plugins') {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
const { ConversationSummaryBufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { formatLangChainMessages, SUMMARY_PROMPT } = require('../prompts');
|
||||
const { predictNewSummary } = require('../chains');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const createSummaryBufferMemory = ({ llm, prompt, messages, ...rest }) => {
|
||||
const chatHistory = new ChatMessageHistory(messages);
|
||||
@@ -22,9 +23,8 @@ const summaryBuffer = async ({
|
||||
prompt = SUMMARY_PROMPT,
|
||||
signal,
|
||||
}) => {
|
||||
if (debug && previous_summary) {
|
||||
console.log('<-----------PREVIOUS SUMMARY----------->\n\n');
|
||||
console.log(previous_summary);
|
||||
if (previous_summary) {
|
||||
logger.debug('[summaryBuffer]', { previous_summary });
|
||||
}
|
||||
|
||||
const formattedMessages = formatLangChainMessages(context, formatOptions);
|
||||
@@ -46,8 +46,7 @@ const summaryBuffer = async ({
|
||||
const messages = await chatPromptMemory.chatHistory.getMessages();
|
||||
|
||||
if (debug) {
|
||||
console.log('<-----------SUMMARY BUFFER MESSAGES----------->\n\n');
|
||||
console.log(JSON.stringify(messages));
|
||||
logger.debug('[summaryBuffer]', { summary_buffer_messages: messages.length });
|
||||
}
|
||||
|
||||
const predictSummary = await predictNewSummary({
|
||||
@@ -58,8 +57,7 @@ const summaryBuffer = async ({
|
||||
});
|
||||
|
||||
if (debug) {
|
||||
console.log('<-----------SUMMARY----------->\n\n');
|
||||
console.log(JSON.stringify(predictSummary));
|
||||
logger.debug('[summaryBuffer]', { summary: predictSummary });
|
||||
}
|
||||
|
||||
return { role: 'system', content: predictSummary };
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* The `addImages` function corrects any erroneous image URLs in the `responseMessage.text`
|
||||
* and appends image observations from `intermediateSteps` if they are not already present.
|
||||
@@ -20,7 +22,7 @@
|
||||
*
|
||||
* addImages(intermediateSteps, responseMessage);
|
||||
*
|
||||
* console.log(responseMessage.text);
|
||||
* logger.debug(responseMessage.text);
|
||||
* // Outputs: 'Some text with \n'
|
||||
*
|
||||
* @returns {void}
|
||||
@@ -62,7 +64,7 @@ function addImages(intermediateSteps, responseMessage) {
|
||||
if (observedImagePath && !responseMessage.text.includes(observedImagePath[0])) {
|
||||
responseMessage.text += '\n' + observation;
|
||||
if (process.env.DEBUG_PLUGINS) {
|
||||
console.debug('[addImages] added image from intermediateSteps');
|
||||
logger.debug('[addImages] added image from intermediateSteps:', observation);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
42
api/app/clients/prompts/formatGoogleInputs.js
Normal file
42
api/app/clients/prompts/formatGoogleInputs.js
Normal file
@@ -0,0 +1,42 @@
|
||||
/**
|
||||
* Formats an object to match the struct_val, list_val, string_val, float_val, and int_val format.
|
||||
*
|
||||
* @param {Object} obj - The object to be formatted.
|
||||
* @returns {Object} The formatted object.
|
||||
*
|
||||
* Handles different types:
|
||||
* - Arrays are wrapped in list_val and each element is processed.
|
||||
* - Objects are recursively processed.
|
||||
* - Strings are wrapped in string_val.
|
||||
* - Numbers are wrapped in float_val or int_val depending on whether they are floating-point or integers.
|
||||
*/
|
||||
function formatGoogleInputs(obj) {
|
||||
const formattedObj = {};
|
||||
|
||||
for (const key in obj) {
|
||||
if (Object.prototype.hasOwnProperty.call(obj, key)) {
|
||||
const value = obj[key];
|
||||
|
||||
// Handle arrays
|
||||
if (Array.isArray(value)) {
|
||||
formattedObj[key] = { list_val: value.map((item) => formatGoogleInputs(item)) };
|
||||
}
|
||||
// Handle objects
|
||||
else if (typeof value === 'object' && value !== null) {
|
||||
formattedObj[key] = formatGoogleInputs(value);
|
||||
}
|
||||
// Handle numbers
|
||||
else if (typeof value === 'number') {
|
||||
formattedObj[key] = Number.isInteger(value) ? { int_val: value } : { float_val: value };
|
||||
}
|
||||
// Handle other types (e.g., strings)
|
||||
else {
|
||||
formattedObj[key] = { string_val: [value] };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { struct_val: formattedObj };
|
||||
}
|
||||
|
||||
module.exports = formatGoogleInputs;
|
||||
274
api/app/clients/prompts/formatGoogleInputs.spec.js
Normal file
274
api/app/clients/prompts/formatGoogleInputs.spec.js
Normal file
@@ -0,0 +1,274 @@
|
||||
const formatGoogleInputs = require('./formatGoogleInputs');
|
||||
|
||||
describe('formatGoogleInputs', () => {
|
||||
it('formats message correctly', () => {
|
||||
const input = {
|
||||
messages: [
|
||||
{
|
||||
content: 'hi',
|
||||
author: 'user',
|
||||
},
|
||||
],
|
||||
context: 'context',
|
||||
examples: [
|
||||
{
|
||||
input: {
|
||||
author: 'user',
|
||||
content: 'user input',
|
||||
},
|
||||
output: {
|
||||
author: 'bot',
|
||||
content: 'bot output',
|
||||
},
|
||||
},
|
||||
],
|
||||
parameters: {
|
||||
temperature: 0.2,
|
||||
topP: 0.8,
|
||||
topK: 40,
|
||||
maxOutputTokens: 1024,
|
||||
},
|
||||
};
|
||||
|
||||
const expectedOutput = {
|
||||
struct_val: {
|
||||
messages: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
content: {
|
||||
string_val: ['hi'],
|
||||
},
|
||||
author: {
|
||||
string_val: ['user'],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
context: {
|
||||
string_val: ['context'],
|
||||
},
|
||||
examples: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
input: {
|
||||
struct_val: {
|
||||
author: {
|
||||
string_val: ['user'],
|
||||
},
|
||||
content: {
|
||||
string_val: ['user input'],
|
||||
},
|
||||
},
|
||||
},
|
||||
output: {
|
||||
struct_val: {
|
||||
author: {
|
||||
string_val: ['bot'],
|
||||
},
|
||||
content: {
|
||||
string_val: ['bot output'],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
parameters: {
|
||||
struct_val: {
|
||||
temperature: {
|
||||
float_val: 0.2,
|
||||
},
|
||||
topP: {
|
||||
float_val: 0.8,
|
||||
},
|
||||
topK: {
|
||||
int_val: 40,
|
||||
},
|
||||
maxOutputTokens: {
|
||||
int_val: 1024,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = formatGoogleInputs(input);
|
||||
expect(JSON.stringify(result)).toEqual(JSON.stringify(expectedOutput));
|
||||
});
|
||||
|
||||
it('formats real payload parts', () => {
|
||||
const input = {
|
||||
instances: [
|
||||
{
|
||||
context: 'context',
|
||||
examples: [
|
||||
{
|
||||
input: {
|
||||
author: 'user',
|
||||
content: 'user input',
|
||||
},
|
||||
output: {
|
||||
author: 'bot',
|
||||
content: 'user output',
|
||||
},
|
||||
},
|
||||
],
|
||||
messages: [
|
||||
{
|
||||
author: 'user',
|
||||
content: 'hi',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
parameters: {
|
||||
candidateCount: 1,
|
||||
maxOutputTokens: 1024,
|
||||
temperature: 0.2,
|
||||
topP: 0.8,
|
||||
topK: 40,
|
||||
},
|
||||
};
|
||||
const expectedOutput = {
|
||||
struct_val: {
|
||||
instances: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
context: { string_val: ['context'] },
|
||||
examples: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
input: {
|
||||
struct_val: {
|
||||
author: { string_val: ['user'] },
|
||||
content: { string_val: ['user input'] },
|
||||
},
|
||||
},
|
||||
output: {
|
||||
struct_val: {
|
||||
author: { string_val: ['bot'] },
|
||||
content: { string_val: ['user output'] },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
messages: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
author: { string_val: ['user'] },
|
||||
content: { string_val: ['hi'] },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
parameters: {
|
||||
struct_val: {
|
||||
candidateCount: { int_val: 1 },
|
||||
maxOutputTokens: { int_val: 1024 },
|
||||
temperature: { float_val: 0.2 },
|
||||
topP: { float_val: 0.8 },
|
||||
topK: { int_val: 40 },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const result = formatGoogleInputs(input);
|
||||
expect(JSON.stringify(result)).toEqual(JSON.stringify(expectedOutput));
|
||||
});
|
||||
|
||||
it('helps create valid payload parts', () => {
|
||||
const instances = {
|
||||
context: 'context',
|
||||
examples: [
|
||||
{
|
||||
input: {
|
||||
author: 'user',
|
||||
content: 'user input',
|
||||
},
|
||||
output: {
|
||||
author: 'bot',
|
||||
content: 'user output',
|
||||
},
|
||||
},
|
||||
],
|
||||
messages: [
|
||||
{
|
||||
author: 'user',
|
||||
content: 'hi',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const expectedInstances = {
|
||||
struct_val: {
|
||||
context: { string_val: ['context'] },
|
||||
examples: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
input: {
|
||||
struct_val: {
|
||||
author: { string_val: ['user'] },
|
||||
content: { string_val: ['user input'] },
|
||||
},
|
||||
},
|
||||
output: {
|
||||
struct_val: {
|
||||
author: { string_val: ['bot'] },
|
||||
content: { string_val: ['user output'] },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
messages: {
|
||||
list_val: [
|
||||
{
|
||||
struct_val: {
|
||||
author: { string_val: ['user'] },
|
||||
content: { string_val: ['hi'] },
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const parameters = {
|
||||
candidateCount: 1,
|
||||
maxOutputTokens: 1024,
|
||||
temperature: 0.2,
|
||||
topP: 0.8,
|
||||
topK: 40,
|
||||
};
|
||||
const expectedParameters = {
|
||||
struct_val: {
|
||||
candidateCount: { int_val: 1 },
|
||||
maxOutputTokens: { int_val: 1024 },
|
||||
temperature: { float_val: 0.2 },
|
||||
topP: { float_val: 0.8 },
|
||||
topK: { int_val: 40 },
|
||||
},
|
||||
};
|
||||
|
||||
const instancesResult = formatGoogleInputs(instances);
|
||||
const parametersResult = formatGoogleInputs(parameters);
|
||||
expect(JSON.stringify(instancesResult)).toEqual(JSON.stringify(expectedInstances));
|
||||
expect(JSON.stringify(parametersResult)).toEqual(JSON.stringify(expectedParameters));
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,21 @@
|
||||
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
|
||||
|
||||
/**
|
||||
* Formats a message to OpenAI Vision API payload format.
|
||||
*
|
||||
* @param {Object} params - The parameters for formatting.
|
||||
* @param {Object} params.message - The message object to format.
|
||||
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
|
||||
* @param {string} [params.message.content] - The text content of the message.
|
||||
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
|
||||
* @returns {(Object)} - The formatted message.
|
||||
*/
|
||||
const formatVisionMessage = ({ message, image_urls }) => {
|
||||
message.content = [{ type: 'text', text: message.content }, ...image_urls];
|
||||
|
||||
return message;
|
||||
};
|
||||
|
||||
/**
|
||||
* Formats a message to OpenAI payload format based on the provided options.
|
||||
*
|
||||
@@ -10,6 +26,7 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
|
||||
* @param {string} [params.message.sender] - The sender of the message.
|
||||
* @param {string} [params.message.text] - The text content of the message.
|
||||
* @param {string} [params.message.content] - The content of the message.
|
||||
* @param {Array<string>} [params.message.image_urls] - The image_urls attached to the message for Vision API.
|
||||
* @param {string} [params.userName] - The name of the user.
|
||||
* @param {string} [params.assistantName] - The name of the assistant.
|
||||
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
|
||||
@@ -32,6 +49,11 @@ const formatMessage = ({ message, userName, assistantName, langChain = false })
|
||||
content,
|
||||
};
|
||||
|
||||
const { image_urls } = message;
|
||||
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
|
||||
return formatVisionMessage({ message: formattedMessage, image_urls: message.image_urls });
|
||||
}
|
||||
|
||||
if (_name) {
|
||||
formattedMessage.name = _name;
|
||||
}
|
||||
|
||||
@@ -1,19 +1,33 @@
|
||||
const { initializeFakeClient } = require('./FakeClient');
|
||||
|
||||
jest.mock('../../../lib/db/connectDb');
|
||||
jest.mock('../../../models', () => {
|
||||
return function () {
|
||||
return {
|
||||
save: jest.fn(),
|
||||
deleteConvos: jest.fn(),
|
||||
getConvo: jest.fn(),
|
||||
getMessages: jest.fn(),
|
||||
saveMessage: jest.fn(),
|
||||
updateMessage: jest.fn(),
|
||||
saveConvo: jest.fn(),
|
||||
};
|
||||
};
|
||||
});
|
||||
jest.mock('~/models', () => ({
|
||||
User: jest.fn(),
|
||||
Key: jest.fn(),
|
||||
Session: jest.fn(),
|
||||
Balance: jest.fn(),
|
||||
Transaction: jest.fn(),
|
||||
getMessages: jest.fn().mockResolvedValue([]),
|
||||
saveMessage: jest.fn(),
|
||||
updateMessage: jest.fn(),
|
||||
deleteMessagesSince: jest.fn(),
|
||||
deleteMessages: jest.fn(),
|
||||
getConvoTitle: jest.fn(),
|
||||
getConvo: jest.fn(),
|
||||
saveConvo: jest.fn(),
|
||||
deleteConvos: jest.fn(),
|
||||
getPreset: jest.fn(),
|
||||
getPresets: jest.fn(),
|
||||
savePreset: jest.fn(),
|
||||
deletePresets: jest.fn(),
|
||||
findFileById: jest.fn(),
|
||||
createFile: jest.fn(),
|
||||
updateFile: jest.fn(),
|
||||
deleteFile: jest.fn(),
|
||||
deleteFiles: jest.fn(),
|
||||
getFiles: jest.fn(),
|
||||
updateFileUsage: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('langchain/chat_models/openai', () => {
|
||||
return {
|
||||
@@ -529,9 +543,9 @@ describe('BaseClient', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test('setOptions is called with the correct arguments', async () => {
|
||||
test('setOptions is called with the correct arguments only when replaceOptions is set to true', async () => {
|
||||
TestClient.setOptions = jest.fn();
|
||||
const opts = { conversationId: '123', parentMessageId: '456' };
|
||||
const opts = { conversationId: '123', parentMessageId: '456', replaceOptions: true };
|
||||
await TestClient.sendMessage('Hello, world!', opts);
|
||||
expect(TestClient.setOptions).toHaveBeenCalledWith(opts);
|
||||
TestClient.setOptions.mockClear();
|
||||
|
||||
@@ -42,7 +42,6 @@ class FakeClient extends BaseClient {
|
||||
|
||||
this.maxContextTokens = getModelMaxTokens(this.modelOptions.model) ?? 4097;
|
||||
}
|
||||
getCompletion() {}
|
||||
buildMessages() {}
|
||||
getTokenCount(str) {
|
||||
return str.length;
|
||||
@@ -86,6 +85,19 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
|
||||
return 'Mock response text';
|
||||
});
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
|
||||
return {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'Mock response text',
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
});
|
||||
|
||||
TestClient.buildMessages = jest.fn(async (messages, parentMessageId) => {
|
||||
const orderedMessages = TestClient.constructor.getMessagesForConversation({
|
||||
messages,
|
||||
|
||||
@@ -1,8 +1,138 @@
|
||||
require('dotenv').config();
|
||||
const OpenAI = require('openai');
|
||||
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||
const { genAzureChatCompletion } = require('~/utils/azureUtils');
|
||||
const OpenAIClient = require('../OpenAIClient');
|
||||
|
||||
jest.mock('meilisearch');
|
||||
|
||||
jest.mock('~/lib/db/connectDb');
|
||||
jest.mock('~/models', () => ({
|
||||
User: jest.fn(),
|
||||
Key: jest.fn(),
|
||||
Session: jest.fn(),
|
||||
Balance: jest.fn(),
|
||||
Transaction: jest.fn(),
|
||||
getMessages: jest.fn().mockResolvedValue([]),
|
||||
saveMessage: jest.fn(),
|
||||
updateMessage: jest.fn(),
|
||||
deleteMessagesSince: jest.fn(),
|
||||
deleteMessages: jest.fn(),
|
||||
getConvoTitle: jest.fn(),
|
||||
getConvo: jest.fn(),
|
||||
saveConvo: jest.fn(),
|
||||
deleteConvos: jest.fn(),
|
||||
getPreset: jest.fn(),
|
||||
getPresets: jest.fn(),
|
||||
savePreset: jest.fn(),
|
||||
deletePresets: jest.fn(),
|
||||
findFileById: jest.fn(),
|
||||
createFile: jest.fn(),
|
||||
updateFile: jest.fn(),
|
||||
deleteFile: jest.fn(),
|
||||
deleteFiles: jest.fn(),
|
||||
getFiles: jest.fn(),
|
||||
updateFileUsage: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('langchain/chat_models/openai', () => {
|
||||
return {
|
||||
ChatOpenAI: jest.fn().mockImplementation(() => {
|
||||
return {};
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('openai');
|
||||
|
||||
jest.spyOn(OpenAI, 'constructor').mockImplementation(function (...options) {
|
||||
// We can add additional logic here if needed
|
||||
return new OpenAI(...options);
|
||||
});
|
||||
|
||||
const finalChatCompletion = jest.fn().mockResolvedValue({
|
||||
choices: [
|
||||
{
|
||||
message: { role: 'assistant', content: 'Mock message content' },
|
||||
finish_reason: 'Mock finish reason',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
const stream = jest.fn().mockImplementation(() => {
|
||||
let isDone = false;
|
||||
let isError = false;
|
||||
let errorCallback = null;
|
||||
|
||||
const onEventHandlers = {
|
||||
abort: () => {
|
||||
// Mock abort behavior
|
||||
},
|
||||
error: (callback) => {
|
||||
errorCallback = callback; // Save the error callback for later use
|
||||
},
|
||||
finalMessage: (callback) => {
|
||||
callback({ role: 'assistant', content: 'Mock Response' });
|
||||
isDone = true; // Set stream to done
|
||||
},
|
||||
};
|
||||
|
||||
const mockStream = {
|
||||
on: jest.fn((event, callback) => {
|
||||
if (onEventHandlers[event]) {
|
||||
onEventHandlers[event](callback);
|
||||
}
|
||||
return mockStream;
|
||||
}),
|
||||
finalChatCompletion,
|
||||
controller: { abort: jest.fn() },
|
||||
triggerError: () => {
|
||||
isError = true;
|
||||
if (errorCallback) {
|
||||
errorCallback(new Error('Mock error'));
|
||||
}
|
||||
},
|
||||
[Symbol.asyncIterator]: () => {
|
||||
return {
|
||||
next: () => {
|
||||
if (isError) {
|
||||
return Promise.reject(new Error('Mock error'));
|
||||
}
|
||||
if (isDone) {
|
||||
return Promise.resolve({ done: true });
|
||||
}
|
||||
const chunk = { choices: [{ delta: { content: 'Mock chunk' } }] };
|
||||
return Promise.resolve({ value: chunk, done: false });
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
return mockStream;
|
||||
});
|
||||
|
||||
const create = jest.fn().mockResolvedValue({
|
||||
choices: [
|
||||
{
|
||||
message: { content: 'Mock message content' },
|
||||
finish_reason: 'Mock finish reason',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
OpenAI.mockImplementation(() => ({
|
||||
beta: {
|
||||
chat: {
|
||||
completions: {
|
||||
stream,
|
||||
},
|
||||
},
|
||||
},
|
||||
chat: {
|
||||
completions: {
|
||||
create,
|
||||
},
|
||||
},
|
||||
}));
|
||||
|
||||
describe('OpenAIClient', () => {
|
||||
let client, client2;
|
||||
const model = 'gpt-4';
|
||||
@@ -12,6 +142,21 @@ describe('OpenAIClient', () => {
|
||||
{ role: 'assistant', sender: 'Assistant', text: 'Hi', messageId: '2' },
|
||||
];
|
||||
|
||||
const defaultOptions = {
|
||||
// debug: true,
|
||||
openaiApiKey: 'new-api-key',
|
||||
modelOptions: {
|
||||
model,
|
||||
temperature: 0.7,
|
||||
},
|
||||
};
|
||||
|
||||
const defaultAzureOptions = {
|
||||
azureOpenAIApiInstanceName: 'your-instance-name',
|
||||
azureOpenAIApiDeploymentName: 'your-deployment-name',
|
||||
azureOpenAIApiVersion: '2020-07-01-preview',
|
||||
};
|
||||
|
||||
beforeAll(() => {
|
||||
jest.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
});
|
||||
@@ -21,14 +166,7 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
const options = {
|
||||
// debug: true,
|
||||
openaiApiKey: 'new-api-key',
|
||||
modelOptions: {
|
||||
model,
|
||||
temperature: 0.7,
|
||||
},
|
||||
};
|
||||
const options = { ...defaultOptions };
|
||||
client = new OpenAIClient('test-api-key', options);
|
||||
client2 = new OpenAIClient('test-api-key', options);
|
||||
client.summarizeMessages = jest.fn().mockResolvedValue({
|
||||
@@ -40,6 +178,7 @@ describe('OpenAIClient', () => {
|
||||
.fn()
|
||||
.mockResolvedValue({ prompt: messages.map((m) => m.text).join('\n') });
|
||||
client.constructor.freeAndResetAllEncoders();
|
||||
client.getMessages = jest.fn().mockResolvedValue([]);
|
||||
});
|
||||
|
||||
describe('setOptions', () => {
|
||||
@@ -94,7 +233,7 @@ describe('OpenAIClient', () => {
|
||||
|
||||
client.setOptions({ reverseProxyUrl: 'https://example.com/completions' });
|
||||
expect(client.completionsUrl).toBe('https://example.com/completions');
|
||||
expect(client.langchainProxy).toBe(null);
|
||||
expect(client.langchainProxy).toBe('https://example.com/completions');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -408,4 +547,86 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendMessage/getCompletion/chatCompletion', () => {
|
||||
afterEach(() => {
|
||||
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
|
||||
delete process.env.OPENROUTER_API_KEY;
|
||||
});
|
||||
|
||||
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
|
||||
const model = 'text-davinci-003';
|
||||
const onProgress = jest.fn().mockImplementation(() => ({}));
|
||||
|
||||
const testClient = new OpenAIClient('test-api-key', {
|
||||
...defaultOptions,
|
||||
modelOptions: { model },
|
||||
});
|
||||
|
||||
const getCompletion = jest.spyOn(testClient, 'getCompletion');
|
||||
await testClient.sendMessage('Hi mom!', { onProgress });
|
||||
|
||||
expect(getCompletion).toHaveBeenCalled();
|
||||
expect(getCompletion.mock.calls.length).toBe(1);
|
||||
|
||||
const currentDateString = new Date().toLocaleDateString('en-us', {
|
||||
year: 'numeric',
|
||||
month: 'long',
|
||||
day: 'numeric',
|
||||
});
|
||||
|
||||
expect(getCompletion.mock.calls[0][0]).toBe(
|
||||
`||>Instructions:\nYou are ChatGPT, a large language model trained by OpenAI. Respond conversationally.\nCurrent date: ${currentDateString}\n\n||>User:\nHi mom!\n||>Assistant:\n`,
|
||||
);
|
||||
|
||||
expect(fetchEventSource).toHaveBeenCalled();
|
||||
expect(fetchEventSource.mock.calls.length).toBe(1);
|
||||
|
||||
// Check if the first argument (url) is correct
|
||||
const firstCallArgs = fetchEventSource.mock.calls[0];
|
||||
|
||||
const expectedURL = 'https://api.openai.com/v1/completions';
|
||||
expect(firstCallArgs[0]).toBe(expectedURL);
|
||||
|
||||
const requestBody = JSON.parse(firstCallArgs[1].body);
|
||||
expect(requestBody).toHaveProperty('model');
|
||||
expect(requestBody.model).toBe(model);
|
||||
});
|
||||
|
||||
it('[Azure OpenAI] should call chatCompletion and OpenAI.stream with correct args', async () => {
|
||||
// Set a default model
|
||||
process.env.AZURE_OPENAI_DEFAULT_MODEL = 'gpt4-turbo';
|
||||
|
||||
const onProgress = jest.fn().mockImplementation(() => ({}));
|
||||
client.azure = defaultAzureOptions;
|
||||
const chatCompletion = jest.spyOn(client, 'chatCompletion');
|
||||
await client.sendMessage('Hi mom!', {
|
||||
replaceOptions: true,
|
||||
...defaultOptions,
|
||||
modelOptions: { model: 'gpt4-turbo', stream: true },
|
||||
onProgress,
|
||||
azure: defaultAzureOptions,
|
||||
});
|
||||
|
||||
expect(chatCompletion).toHaveBeenCalled();
|
||||
expect(chatCompletion.mock.calls.length).toBe(1);
|
||||
|
||||
const chatCompletionArgs = chatCompletion.mock.calls[0][0];
|
||||
const { payload } = chatCompletionArgs;
|
||||
|
||||
expect(payload[0].role).toBe('user');
|
||||
expect(payload[0].content).toBe('Hi mom!');
|
||||
|
||||
// Azure OpenAI does not use the model property, and will error if it's passed
|
||||
// This check ensures the model property is not present
|
||||
const streamArgs = stream.mock.calls[0][0];
|
||||
expect(streamArgs).not.toHaveProperty('model');
|
||||
|
||||
// Check if the baseURL is correct
|
||||
const constructorArgs = OpenAI.mock.calls[0][0];
|
||||
const expectedURL = genAzureChatCompletion(defaultAzureOptions).split('/chat')[0];
|
||||
expect(constructorArgs.baseURL).toBe(expectedURL);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -144,4 +144,47 @@ describe('PluginsClient', () => {
|
||||
expect(chatMessages[0].text).toEqual(userMessage);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getFunctionModelName', () => {
|
||||
let client;
|
||||
|
||||
beforeEach(() => {
|
||||
client = new PluginsClient('dummy_api_key');
|
||||
});
|
||||
|
||||
test('should return the input when it includes a dash followed by four digits', () => {
|
||||
expect(client.getFunctionModelName('-1234')).toBe('-1234');
|
||||
expect(client.getFunctionModelName('gpt-4-5678-preview')).toBe('gpt-4-5678-preview');
|
||||
});
|
||||
|
||||
test('should return the input for all function-capable models (`0613` models and above)', () => {
|
||||
expect(client.getFunctionModelName('gpt-4-0613')).toBe('gpt-4-0613');
|
||||
expect(client.getFunctionModelName('gpt-4-32k-0613')).toBe('gpt-4-32k-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-0613')).toBe('gpt-3.5-turbo-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0613')).toBe('gpt-3.5-turbo-16k-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
|
||||
expect(client.getFunctionModelName('gpt-4-1106-preview')).toBe('gpt-4-1106-preview');
|
||||
expect(client.getFunctionModelName('gpt-4-1106')).toBe('gpt-4-1106');
|
||||
});
|
||||
|
||||
test('should return the corresponding model if input is non-function capable (`0314` models)', () => {
|
||||
expect(client.getFunctionModelName('gpt-4-0314')).toBe('gpt-4');
|
||||
expect(client.getFunctionModelName('gpt-4-32k-0314')).toBe('gpt-4');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-0314')).toBe('gpt-3.5-turbo');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0314')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
test('should return "gpt-3.5-turbo" when the input includes "gpt-3.5-turbo"', () => {
|
||||
expect(client.getFunctionModelName('test gpt-3.5-turbo model')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
test('should return "gpt-4" when the input includes "gpt-4"', () => {
|
||||
expect(client.getFunctionModelName('testing gpt-4')).toBe('gpt-4');
|
||||
});
|
||||
|
||||
test('should return "gpt-3.5-turbo" for input that does not meet any specific condition', () => {
|
||||
expect(client.getFunctionModelName('random string')).toBe('gpt-3.5-turbo');
|
||||
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,238 +0,0 @@
|
||||
const { Tool } = require('langchain/tools');
|
||||
const yaml = require('js-yaml');
|
||||
|
||||
/*
|
||||
export interface AIPluginToolParams {
|
||||
name: string;
|
||||
description: string;
|
||||
apiSpec: string;
|
||||
openaiSpec: string;
|
||||
model: BaseLanguageModel;
|
||||
}
|
||||
|
||||
export interface PathParameter {
|
||||
name: string;
|
||||
description: string;
|
||||
}
|
||||
|
||||
export interface Info {
|
||||
title: string;
|
||||
description: string;
|
||||
version: string;
|
||||
}
|
||||
export interface PathMethod {
|
||||
summary: string;
|
||||
operationId: string;
|
||||
parameters?: PathParameter[];
|
||||
}
|
||||
|
||||
interface ApiSpec {
|
||||
openapi: string;
|
||||
info: Info;
|
||||
paths: { [key: string]: { [key: string]: PathMethod } };
|
||||
}
|
||||
*/
|
||||
|
||||
function isJson(str) {
|
||||
try {
|
||||
JSON.parse(str);
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
function convertJsonToYamlIfApplicable(spec) {
|
||||
if (isJson(spec)) {
|
||||
const jsonData = JSON.parse(spec);
|
||||
return yaml.dump(jsonData);
|
||||
}
|
||||
return spec;
|
||||
}
|
||||
|
||||
function extractShortVersion(openapiSpec) {
|
||||
openapiSpec = convertJsonToYamlIfApplicable(openapiSpec);
|
||||
try {
|
||||
const fullApiSpec = yaml.load(openapiSpec);
|
||||
const shortApiSpec = {
|
||||
openapi: fullApiSpec.openapi,
|
||||
info: fullApiSpec.info,
|
||||
paths: {},
|
||||
};
|
||||
|
||||
for (let path in fullApiSpec.paths) {
|
||||
shortApiSpec.paths[path] = {};
|
||||
for (let method in fullApiSpec.paths[path]) {
|
||||
shortApiSpec.paths[path][method] = {
|
||||
summary: fullApiSpec.paths[path][method].summary,
|
||||
operationId: fullApiSpec.paths[path][method].operationId,
|
||||
parameters: fullApiSpec.paths[path][method].parameters?.map((parameter) => ({
|
||||
name: parameter.name,
|
||||
description: parameter.description,
|
||||
})),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return yaml.dump(shortApiSpec);
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
function printOperationDetails(operationId, openapiSpec) {
|
||||
openapiSpec = convertJsonToYamlIfApplicable(openapiSpec);
|
||||
let returnText = '';
|
||||
try {
|
||||
let doc = yaml.load(openapiSpec);
|
||||
let servers = doc.servers;
|
||||
let paths = doc.paths;
|
||||
let components = doc.components;
|
||||
|
||||
for (let path in paths) {
|
||||
for (let method in paths[path]) {
|
||||
let operation = paths[path][method];
|
||||
if (operation.operationId === operationId) {
|
||||
returnText += `The API request to do for operationId "${operationId}" is:\n`;
|
||||
returnText += `Method: ${method.toUpperCase()}\n`;
|
||||
|
||||
let url = servers[0].url + path;
|
||||
returnText += `Path: ${url}\n`;
|
||||
|
||||
returnText += 'Parameters:\n';
|
||||
if (operation.parameters) {
|
||||
for (let param of operation.parameters) {
|
||||
let required = param.required ? '' : ' (optional),';
|
||||
returnText += `- ${param.name} (${param.in},${required} ${param.schema.type}): ${param.description}\n`;
|
||||
}
|
||||
} else {
|
||||
returnText += ' None\n';
|
||||
}
|
||||
returnText += '\n';
|
||||
|
||||
let responseSchema = operation.responses['200'].content['application/json'].schema;
|
||||
|
||||
// Check if schema is a reference
|
||||
if (responseSchema.$ref) {
|
||||
// Extract schema name from reference
|
||||
let schemaName = responseSchema.$ref.split('/').pop();
|
||||
// Look up schema in components
|
||||
responseSchema = components.schemas[schemaName];
|
||||
}
|
||||
|
||||
returnText += 'Response schema:\n';
|
||||
returnText += '- Type: ' + responseSchema.type + '\n';
|
||||
returnText += '- Additional properties:\n';
|
||||
returnText += ' - Type: ' + responseSchema.additionalProperties?.type + '\n';
|
||||
if (responseSchema.additionalProperties?.properties) {
|
||||
returnText += ' - Properties:\n';
|
||||
for (let prop in responseSchema.additionalProperties.properties) {
|
||||
returnText += ` - ${prop} (${responseSchema.additionalProperties.properties[prop].type}): Description not provided in OpenAPI spec\n`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (returnText === '') {
|
||||
returnText += `No operation with operationId "${operationId}" found.`;
|
||||
}
|
||||
return returnText;
|
||||
} catch (e) {
|
||||
console.log(e);
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
class AIPluginTool extends Tool {
|
||||
/*
|
||||
private _name: string;
|
||||
private _description: string;
|
||||
apiSpec: string;
|
||||
openaiSpec: string;
|
||||
model: BaseLanguageModel;
|
||||
*/
|
||||
|
||||
get name() {
|
||||
return this._name;
|
||||
}
|
||||
|
||||
get description() {
|
||||
return this._description;
|
||||
}
|
||||
|
||||
constructor(params) {
|
||||
super();
|
||||
this._name = params.name;
|
||||
this._description = params.description;
|
||||
this.apiSpec = params.apiSpec;
|
||||
this.openaiSpec = params.openaiSpec;
|
||||
this.model = params.model;
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
let date = new Date();
|
||||
let fullDate = `Date: ${date.getDate()}/${
|
||||
date.getMonth() + 1
|
||||
}/${date.getFullYear()}, Time: ${date.getHours()}:${date.getMinutes()}:${date.getSeconds()}`;
|
||||
const prompt = `${fullDate}\nQuestion: ${input} \n${this.apiSpec}.`;
|
||||
console.log(prompt);
|
||||
const gptResponse = await this.model.predict(prompt);
|
||||
let operationId = gptResponse.match(/operationId: (.*)/)?.[1];
|
||||
if (!operationId) {
|
||||
return 'No operationId found in the response';
|
||||
}
|
||||
if (operationId == 'No API path found to answer the question') {
|
||||
return 'No API path found to answer the question';
|
||||
}
|
||||
|
||||
let openApiData = printOperationDetails(operationId, this.openaiSpec);
|
||||
|
||||
return openApiData;
|
||||
}
|
||||
|
||||
static async fromPluginUrl(url, model) {
|
||||
const aiPluginRes = await fetch(url, {});
|
||||
if (!aiPluginRes.ok) {
|
||||
throw new Error(`Failed to fetch plugin from ${url} with status ${aiPluginRes.status}`);
|
||||
}
|
||||
const aiPluginJson = await aiPluginRes.json();
|
||||
const apiUrlRes = await fetch(aiPluginJson.api.url, {});
|
||||
if (!apiUrlRes.ok) {
|
||||
throw new Error(
|
||||
`Failed to fetch API spec from ${aiPluginJson.api.url} with status ${apiUrlRes.status}`,
|
||||
);
|
||||
}
|
||||
const apiUrlJson = await apiUrlRes.text();
|
||||
const shortApiSpec = extractShortVersion(apiUrlJson);
|
||||
return new AIPluginTool({
|
||||
name: aiPluginJson.name_for_model.toLowerCase(),
|
||||
description: `A \`tool\` to learn the API documentation for ${aiPluginJson.name_for_model.toLowerCase()}, after which you can use 'http_request' to make the actual API call. Short description of how to use the API's results: ${
|
||||
aiPluginJson.description_for_model
|
||||
})`,
|
||||
apiSpec: `
|
||||
As an AI, your task is to identify the operationId of the relevant API path based on the condensed OpenAPI specifications provided.
|
||||
|
||||
Please note:
|
||||
|
||||
1. Do not imagine URLs. Only use the information provided in the condensed OpenAPI specifications.
|
||||
|
||||
2. Do not guess the operationId. Identify it strictly based on the API paths and their descriptions.
|
||||
|
||||
Your output should only include:
|
||||
- operationId: The operationId of the relevant API path
|
||||
|
||||
If you cannot find a suitable API path based on the OpenAPI specifications, please answer only "operationId: No API path found to answer the question".
|
||||
|
||||
Now, based on the question above and the condensed OpenAPI specifications given below, identify the operationId:
|
||||
|
||||
\`\`\`
|
||||
${shortApiSpec}
|
||||
\`\`\`
|
||||
`,
|
||||
openaiSpec: apiUrlJson,
|
||||
model: model,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AIPluginTool;
|
||||
104
api/app/clients/tools/AzureAiSearch.js
Normal file
104
api/app/clients/tools/AzureAiSearch.js
Normal file
@@ -0,0 +1,104 @@
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class AzureAISearch extends StructuredTool {
|
||||
// Constants for default values
|
||||
static DEFAULT_API_VERSION = '2023-11-01';
|
||||
static DEFAULT_QUERY_TYPE = 'simple';
|
||||
static DEFAULT_TOP = 5;
|
||||
|
||||
// Helper function for initializing properties
|
||||
_initializeField(field, envVar, defaultValue) {
|
||||
return field || process.env[envVar] || defaultValue;
|
||||
}
|
||||
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
|
||||
// Initialize properties using helper function
|
||||
this.serviceEndpoint = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SERVICE_ENDPOINT,
|
||||
'AZURE_AI_SEARCH_SERVICE_ENDPOINT',
|
||||
);
|
||||
this.indexName = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_INDEX_NAME,
|
||||
'AZURE_AI_SEARCH_INDEX_NAME',
|
||||
);
|
||||
this.apiKey = this._initializeField(fields.AZURE_AI_SEARCH_API_KEY, 'AZURE_AI_SEARCH_API_KEY');
|
||||
this.apiVersion = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_API_VERSION,
|
||||
'AZURE_AI_SEARCH_API_VERSION',
|
||||
AzureAISearch.DEFAULT_API_VERSION,
|
||||
);
|
||||
this.queryType = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE,
|
||||
'AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE',
|
||||
AzureAISearch.DEFAULT_QUERY_TYPE,
|
||||
);
|
||||
this.top = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SEARCH_OPTION_TOP,
|
||||
'AZURE_AI_SEARCH_SEARCH_OPTION_TOP',
|
||||
AzureAISearch.DEFAULT_TOP,
|
||||
);
|
||||
this.select = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SEARCH_OPTION_SELECT,
|
||||
'AZURE_AI_SEARCH_SEARCH_OPTION_SELECT',
|
||||
);
|
||||
|
||||
// Check for required fields
|
||||
if (!this.serviceEndpoint || !this.indexName || !this.apiKey) {
|
||||
throw new Error(
|
||||
'Missing AZURE_AI_SEARCH_SERVICE_ENDPOINT, AZURE_AI_SEARCH_INDEX_NAME, or AZURE_AI_SEARCH_API_KEY environment variable.',
|
||||
);
|
||||
}
|
||||
|
||||
// Create SearchClient
|
||||
this.client = new SearchClient(
|
||||
this.serviceEndpoint,
|
||||
this.indexName,
|
||||
new AzureKeyCredential(this.apiKey),
|
||||
{ apiVersion: this.apiVersion },
|
||||
);
|
||||
|
||||
// Define schema
|
||||
this.schema = z.object({
|
||||
query: z.string().describe('Search word or phrase to Azure AI Search'),
|
||||
});
|
||||
}
|
||||
|
||||
// Simplified getter methods
|
||||
get name() {
|
||||
return 'azure-ai-search';
|
||||
}
|
||||
|
||||
get description() {
|
||||
return 'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
|
||||
}
|
||||
|
||||
// Improved error handling and logging
|
||||
async _call(data) {
|
||||
const { query } = data;
|
||||
try {
|
||||
const searchOption = {
|
||||
queryType: this.queryType,
|
||||
top: this.top,
|
||||
};
|
||||
if (this.select) {
|
||||
searchOption.select = this.select.split(',');
|
||||
}
|
||||
const searchResults = await this.client.search(query, searchOption);
|
||||
const resultDocuments = [];
|
||||
for await (const result of searchResults.results) {
|
||||
resultDocuments.push(result.document);
|
||||
}
|
||||
return JSON.stringify(resultDocuments);
|
||||
} catch (error) {
|
||||
logger.error('Azure AI Search request failed', error);
|
||||
return 'There was an error with Azure AI Search.';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AzureAISearch;
|
||||
@@ -1,111 +0,0 @@
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
|
||||
class AzureCognitiveSearch extends Tool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
this.serviceEndpoint =
|
||||
fields.AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT || this.getServiceEndpoint();
|
||||
this.indexName = fields.AZURE_COGNITIVE_SEARCH_INDEX_NAME || this.getIndexName();
|
||||
this.apiKey = fields.AZURE_COGNITIVE_SEARCH_API_KEY || this.getApiKey();
|
||||
|
||||
this.apiVersion = fields.AZURE_COGNITIVE_SEARCH_API_VERSION || this.getApiVersion();
|
||||
|
||||
this.queryType = fields.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_QUERY_TYPE || this.getQueryType();
|
||||
this.top = fields.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP || this.getTop();
|
||||
this.select = fields.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT || this.getSelect();
|
||||
|
||||
this.client = new SearchClient(
|
||||
this.serviceEndpoint,
|
||||
this.indexName,
|
||||
new AzureKeyCredential(this.apiKey),
|
||||
{
|
||||
apiVersion: this.apiVersion,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the tool.
|
||||
* @type {string}
|
||||
*/
|
||||
name = 'azure-cognitive-search';
|
||||
|
||||
/**
|
||||
* A description for the agent to use
|
||||
* @type {string}
|
||||
*/
|
||||
description =
|
||||
'Use the \'azure-cognitive-search\' tool to retrieve search results relevant to your input';
|
||||
|
||||
getServiceEndpoint() {
|
||||
const serviceEndpoint = process.env.AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT || '';
|
||||
if (!serviceEndpoint) {
|
||||
throw new Error('Missing AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT environment variable.');
|
||||
}
|
||||
return serviceEndpoint;
|
||||
}
|
||||
|
||||
getIndexName() {
|
||||
const indexName = process.env.AZURE_COGNITIVE_SEARCH_INDEX_NAME || '';
|
||||
if (!indexName) {
|
||||
throw new Error('Missing AZURE_COGNITIVE_SEARCH_INDEX_NAME environment variable.');
|
||||
}
|
||||
return indexName;
|
||||
}
|
||||
|
||||
getApiKey() {
|
||||
const apiKey = process.env.AZURE_COGNITIVE_SEARCH_API_KEY || '';
|
||||
if (!apiKey) {
|
||||
throw new Error('Missing AZURE_COGNITIVE_SEARCH_API_KEY environment variable.');
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
getApiVersion() {
|
||||
return process.env.AZURE_COGNITIVE_SEARCH_API_VERSION || '2020-06-30';
|
||||
}
|
||||
|
||||
getQueryType() {
|
||||
return process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_QUERY_TYPE || 'simple';
|
||||
}
|
||||
|
||||
getTop() {
|
||||
if (process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP) {
|
||||
return Number(process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP);
|
||||
} else {
|
||||
return 5;
|
||||
}
|
||||
}
|
||||
|
||||
getSelect() {
|
||||
if (process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT) {
|
||||
return process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT.split(',');
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async _call(query) {
|
||||
try {
|
||||
const searchOption = {
|
||||
queryType: this.queryType,
|
||||
top: this.top,
|
||||
};
|
||||
if (this.select) {
|
||||
searchOption.select = this.select;
|
||||
}
|
||||
const searchResults = await this.client.search(query, searchOption);
|
||||
const resultDocuments = [];
|
||||
for await (const result of searchResults.results) {
|
||||
resultDocuments.push(result.document);
|
||||
}
|
||||
return JSON.stringify(resultDocuments);
|
||||
} catch (error) {
|
||||
console.error(`Azure Cognitive Search request failed: ${error}`);
|
||||
return 'There was an error with Azure Cognitive Search.';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AzureCognitiveSearch;
|
||||
@@ -1,52 +0,0 @@
|
||||
const { Tool } = require('langchain/tools');
|
||||
const WebSocket = require('ws');
|
||||
const { promisify } = require('util');
|
||||
const fs = require('fs');
|
||||
|
||||
class CodeInterpreter extends Tool {
|
||||
constructor() {
|
||||
super();
|
||||
this.name = 'code-interpreter';
|
||||
this.description = `If there is plotting or any image related tasks, save the result as .png file.
|
||||
No need show the image or plot. USE print(variable_name) if you need output.You can run python codes with this plugin.You have to use print function in python code to get any result from this plugin.
|
||||
This does not support user input. Even if the code has input() function, change it to an appropriate value.
|
||||
You can show the user the code with input() functions. But the code passed to the plug-in should not contain input().
|
||||
You should provide properly formatted code to this plugin. If the code is executed successfully, the stdout will be returned to you. You have to print that to the user, and if the user had
|
||||
asked for an explanation, you have to provide one. If the output is "Error From here" or any other error message,
|
||||
tell the user "Python Engine Failed" and continue with whatever you are supposed to do.`;
|
||||
|
||||
// Create a promisified version of fs.unlink
|
||||
this.unlinkAsync = promisify(fs.unlink);
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
const websocket = new WebSocket('ws://localhost:3380'); // Update with your WebSocket server URL
|
||||
|
||||
// Wait until the WebSocket connection is open
|
||||
await new Promise((resolve) => {
|
||||
websocket.onopen = resolve;
|
||||
});
|
||||
|
||||
// Send the Python code to the server
|
||||
websocket.send(input);
|
||||
|
||||
// Wait for the result from the server
|
||||
const result = await new Promise((resolve) => {
|
||||
websocket.onmessage = (event) => {
|
||||
resolve(event.data);
|
||||
};
|
||||
|
||||
// Handle WebSocket connection closed
|
||||
websocket.onclose = () => {
|
||||
resolve('Python Engine Failed');
|
||||
};
|
||||
});
|
||||
|
||||
// Close the WebSocket connection
|
||||
websocket.close();
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = CodeInterpreter;
|
||||
@@ -3,13 +3,14 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const OpenAI = require('openai');
|
||||
// const { genAzureEndpoint } = require('../../../utils/genAzureEndpoints');
|
||||
// const { genAzureEndpoint } = require('~/utils/genAzureEndpoints');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||
const saveImageFromUrl = require('./saveImageFromUrl');
|
||||
const extractBaseURL = require('../../../utils/extractBaseURL');
|
||||
const { DALLE_REVERSE_PROXY, PROXY } = process.env;
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { DALLE_REVERSE_PROXY, PROXY } = process.env;
|
||||
class OpenAICreateImage extends Tool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
@@ -102,9 +103,12 @@ Guidelines:
|
||||
|
||||
if (match) {
|
||||
imageName = match[0];
|
||||
console.log(imageName); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
logger.debug('[DALL-E]', { imageName }); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
} else {
|
||||
console.log('No image name found in the string.');
|
||||
logger.debug('[DALL-E] No image name found in the string.', {
|
||||
theImageUrl,
|
||||
data: resp.data[0],
|
||||
});
|
||||
}
|
||||
|
||||
this.outputPath = path.resolve(__dirname, '..', '..', '..', '..', 'client', 'public', 'images');
|
||||
@@ -120,7 +124,7 @@ Guidelines:
|
||||
await saveImageFromUrl(theImageUrl, this.outputPath, imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('Error while saving the DALL-E image:', error);
|
||||
this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { google } = require('googleapis');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Represents a tool that allows an agent to use the Google Custom Search API.
|
||||
@@ -86,7 +87,7 @@ class GoogleSearchAPI extends Tool {
|
||||
});
|
||||
|
||||
// return response.data;
|
||||
// console.log(response.data);
|
||||
// logger.debug(response.data);
|
||||
|
||||
if (!response.data.items || response.data.items.length === 0) {
|
||||
return this.resultsToReadableFormat([
|
||||
@@ -110,7 +111,7 @@ class GoogleSearchAPI extends Tool {
|
||||
|
||||
return this.resultsToReadableFormat(metadataResults);
|
||||
} catch (error) {
|
||||
console.log(`Error searching Google: ${error}`);
|
||||
logger.error('[GoogleSearchAPI]', error);
|
||||
// throw error;
|
||||
return 'There was an error searching Google.';
|
||||
}
|
||||
|
||||
@@ -1,108 +0,0 @@
|
||||
const { Tool } = require('langchain/tools');
|
||||
|
||||
// class RequestsGetTool extends Tool {
|
||||
// constructor(headers = {}, { maxOutputLength } = {}) {
|
||||
// super();
|
||||
// this.name = 'requests_get';
|
||||
// this.headers = headers;
|
||||
// this.maxOutputLength = maxOutputLength || 2000;
|
||||
// this.description = `A portal to the internet. Use this when you need to get specific content from a website.
|
||||
// - Input should be a url (i.e. https://www.google.com). The output will be the text response of the GET request.`;
|
||||
// }
|
||||
|
||||
// async _call(input) {
|
||||
// const res = await fetch(input, {
|
||||
// headers: this.headers
|
||||
// });
|
||||
// const text = await res.text();
|
||||
// return text.slice(0, this.maxOutputLength);
|
||||
// }
|
||||
// }
|
||||
|
||||
// class RequestsPostTool extends Tool {
|
||||
// constructor(headers = {}, { maxOutputLength } = {}) {
|
||||
// super();
|
||||
// this.name = 'requests_post';
|
||||
// this.headers = headers;
|
||||
// this.maxOutputLength = maxOutputLength || Infinity;
|
||||
// this.description = `Use this when you want to POST to a website.
|
||||
// - Input should be a json string with two keys: "url" and "data".
|
||||
// - The value of "url" should be a string, and the value of "data" should be a dictionary of
|
||||
// - key-value pairs you want to POST to the url as a JSON body.
|
||||
// - Be careful to always use double quotes for strings in the json string
|
||||
// - The output will be the text response of the POST request.`;
|
||||
// }
|
||||
|
||||
// async _call(input) {
|
||||
// try {
|
||||
// const { url, data } = JSON.parse(input);
|
||||
// const res = await fetch(url, {
|
||||
// method: 'POST',
|
||||
// headers: this.headers,
|
||||
// body: JSON.stringify(data)
|
||||
// });
|
||||
// const text = await res.text();
|
||||
// return text.slice(0, this.maxOutputLength);
|
||||
// } catch (error) {
|
||||
// return `${error}`;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
class HttpRequestTool extends Tool {
|
||||
constructor(headers = {}, { maxOutputLength = Infinity } = {}) {
|
||||
super();
|
||||
this.headers = headers;
|
||||
this.name = 'http_request';
|
||||
this.maxOutputLength = maxOutputLength;
|
||||
this.description =
|
||||
'Executes HTTP methods (GET, POST, PUT, DELETE, etc.). The input is an object with three keys: "url", "method", and "data". Even for GET or DELETE, include "data" key as an empty string. "method" is the HTTP method, and "url" is the desired endpoint. If POST or PUT, "data" should contain a stringified JSON representing the body to send. Only one url per use.';
|
||||
}
|
||||
|
||||
async _call(input) {
|
||||
try {
|
||||
const urlPattern = /"url":\s*"([^"]*)"/;
|
||||
const methodPattern = /"method":\s*"([^"]*)"/;
|
||||
const dataPattern = /"data":\s*"([^"]*)"/;
|
||||
|
||||
const url = input.match(urlPattern)[1];
|
||||
const method = input.match(methodPattern)[1];
|
||||
let data = input.match(dataPattern)[1];
|
||||
|
||||
// Parse 'data' back to JSON if possible
|
||||
try {
|
||||
data = JSON.parse(data);
|
||||
} catch (e) {
|
||||
// If it's not a JSON string, keep it as is
|
||||
}
|
||||
|
||||
let options = {
|
||||
method: method,
|
||||
headers: this.headers,
|
||||
};
|
||||
|
||||
if (['POST', 'PUT', 'PATCH'].includes(method.toUpperCase()) && data) {
|
||||
if (typeof data === 'object') {
|
||||
options.body = JSON.stringify(data);
|
||||
} else {
|
||||
options.body = data;
|
||||
}
|
||||
options.headers['Content-Type'] = 'application/json';
|
||||
}
|
||||
|
||||
const res = await fetch(url, options);
|
||||
|
||||
const text = await res.text();
|
||||
if (text.includes('<html')) {
|
||||
return 'This tool is not designed to browse web pages. Only use it for API calls.';
|
||||
}
|
||||
|
||||
return text.slice(0, this.maxOutputLength);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
return `${error}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = HttpRequestTool;
|
||||
@@ -1,9 +1,10 @@
|
||||
// Generates image using stable diffusion webui's api (automatic1111)
|
||||
const fs = require('fs');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class StableDiffusionAPI extends Tool {
|
||||
constructor(fields) {
|
||||
@@ -81,7 +82,7 @@ Guidelines:
|
||||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('[StableDiffusion] Error while saving the image:', error);
|
||||
// this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
/* eslint-disable no-useless-escape */
|
||||
const axios = require('axios');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class WolframAlphaAPI extends Tool {
|
||||
constructor(fields) {
|
||||
@@ -38,7 +39,7 @@ General guidelines:
|
||||
const response = await axios.get(url, { responseType: 'text' });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error(`Error fetching raw text: ${error}`);
|
||||
logger.error('[WolframAlphaAPI] Error fetching raw text:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -68,11 +69,10 @@ General guidelines:
|
||||
return response;
|
||||
} catch (error) {
|
||||
if (error.response && error.response.data) {
|
||||
console.log('Error data:', error.response.data);
|
||||
logger.error('[WolframAlphaAPI] Error data:', error);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log('Error querying Wolfram Alpha', error.message);
|
||||
// throw error;
|
||||
logger.error('[WolframAlphaAPI] Error querying Wolfram Alpha', error);
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
require('dotenv').config();
|
||||
const { z } = require('zod');
|
||||
const fs = require('fs');
|
||||
const yaml = require('js-yaml');
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const { DynamicStructuredTool } = require('langchain/tools');
|
||||
const yaml = require('js-yaml');
|
||||
const { createOpenAPIChain } = require('langchain/chains');
|
||||
const { DynamicStructuredTool } = require('langchain/tools');
|
||||
const { ChatPromptTemplate, HumanMessagePromptTemplate } = require('langchain/prompts');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
function addLinePrefix(text, prefix = '// ') {
|
||||
return text
|
||||
@@ -52,7 +53,7 @@ async function readSpecFile(filePath) {
|
||||
}
|
||||
return yaml.load(fileContents);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
logger.error('[readSpecFile] error', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -83,54 +84,51 @@ async function getSpec(url) {
|
||||
return ValidSpecPath.parse(url);
|
||||
}
|
||||
|
||||
async function createOpenAPIPlugin({ data, llm, user, message, memory, signal, verbose = false }) {
|
||||
async function createOpenAPIPlugin({ data, llm, user, message, memory, signal }) {
|
||||
let spec;
|
||||
try {
|
||||
spec = await getSpec(data.api.url, verbose);
|
||||
spec = await getSpec(data.api.url);
|
||||
} catch (error) {
|
||||
verbose && console.debug('getSpec error', error);
|
||||
logger.error('[createOpenAPIPlugin] getSpec error', error);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!spec) {
|
||||
verbose && console.debug('No spec found');
|
||||
logger.warn('[createOpenAPIPlugin] No spec found');
|
||||
return null;
|
||||
}
|
||||
|
||||
const headers = {};
|
||||
const { auth, name_for_model, description_for_model, description_for_human } = data;
|
||||
if (auth && AuthDefinition.parse(auth)) {
|
||||
verbose && console.debug('auth detected', auth);
|
||||
logger.debug('[createOpenAPIPlugin] auth detected', auth);
|
||||
const { openai } = auth.verification_tokens;
|
||||
if (AuthBearer.parse(auth)) {
|
||||
headers.authorization = `Bearer ${openai}`;
|
||||
verbose && console.debug('added auth bearer', headers);
|
||||
logger.debug('[createOpenAPIPlugin] added auth bearer', headers);
|
||||
}
|
||||
}
|
||||
|
||||
const chainOptions = {
|
||||
llm,
|
||||
verbose,
|
||||
};
|
||||
const chainOptions = { llm };
|
||||
|
||||
if (data.headers && data.headers['librechat_user_id']) {
|
||||
verbose && console.debug('id detected', headers);
|
||||
logger.debug('[createOpenAPIPlugin] id detected', headers);
|
||||
headers[data.headers['librechat_user_id']] = user;
|
||||
}
|
||||
|
||||
if (Object.keys(headers).length > 0) {
|
||||
verbose && console.debug('headers detected', headers);
|
||||
logger.debug('[createOpenAPIPlugin] headers detected', headers);
|
||||
chainOptions.headers = headers;
|
||||
}
|
||||
|
||||
if (data.params) {
|
||||
verbose && console.debug('params detected', data.params);
|
||||
logger.debug('[createOpenAPIPlugin] params detected', data.params);
|
||||
chainOptions.params = data.params;
|
||||
}
|
||||
|
||||
let history = '';
|
||||
if (memory) {
|
||||
verbose && console.debug('openAPI chain: memory detected', memory);
|
||||
logger.debug('[createOpenAPIPlugin] openAPI chain: memory detected', memory);
|
||||
const { history: chat_history } = await memory.loadMemoryVariables({});
|
||||
history = chat_history?.length > 0 ? `\n\n## Chat History:\n${chat_history}\n` : '';
|
||||
}
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
const GoogleSearchAPI = require('./GoogleSearch');
|
||||
const HttpRequestTool = require('./HttpRequestTool');
|
||||
const AIPluginTool = require('./AIPluginTool');
|
||||
const OpenAICreateImage = require('./DALL-E');
|
||||
const DALLE3 = require('./structured/DALLE3');
|
||||
const StructuredSD = require('./structured/StableDiffusion');
|
||||
@@ -8,21 +6,18 @@ const StableDiffusionAPI = require('./StableDiffusion');
|
||||
const WolframAlphaAPI = require('./Wolfram');
|
||||
const StructuredWolfram = require('./structured/Wolfram');
|
||||
const SelfReflectionTool = require('./SelfReflection');
|
||||
const AzureCognitiveSearch = require('./AzureCognitiveSearch');
|
||||
const StructuredACS = require('./structured/AzureCognitiveSearch');
|
||||
const AzureAiSearch = require('./AzureAiSearch');
|
||||
const StructuredACS = require('./structured/AzureAISearch');
|
||||
const ChatTool = require('./structured/ChatTool');
|
||||
const E2BTools = require('./structured/E2BTools');
|
||||
const CodeSherpa = require('./structured/CodeSherpa');
|
||||
const CodeSherpaTools = require('./structured/CodeSherpaTools');
|
||||
const availableTools = require('./manifest.json');
|
||||
const CodeInterpreter = require('./CodeInterpreter');
|
||||
const CodeBrew = require('./CodeBrew');
|
||||
|
||||
module.exports = {
|
||||
availableTools,
|
||||
GoogleSearchAPI,
|
||||
HttpRequestTool,
|
||||
AIPluginTool,
|
||||
OpenAICreateImage,
|
||||
DALLE3,
|
||||
StableDiffusionAPI,
|
||||
@@ -30,12 +25,11 @@ module.exports = {
|
||||
WolframAlphaAPI,
|
||||
StructuredWolfram,
|
||||
SelfReflectionTool,
|
||||
AzureCognitiveSearch,
|
||||
AzureAiSearch,
|
||||
StructuredACS,
|
||||
E2BTools,
|
||||
ChatTool,
|
||||
CodeSherpa,
|
||||
CodeSherpaTools,
|
||||
CodeInterpreter,
|
||||
CodeBrew,
|
||||
};
|
||||
|
||||
@@ -143,38 +143,25 @@
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Azure Cognitive Search",
|
||||
"pluginKey": "azure-cognitive-search",
|
||||
"description": "Use Azure Cognitive Search to find information",
|
||||
"name": "Azure AI Search",
|
||||
"pluginKey": "azure-ai-search",
|
||||
"description": "Use Azure AI Search to find information",
|
||||
"icon": "https://i.imgur.com/E7crPze.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT",
|
||||
"label": "Azur Cognitive Search Endpoint",
|
||||
"description": "You need to provide your Endpoint for Azure Cognitive Search."
|
||||
"authField": "AZURE_AI_SEARCH_SERVICE_ENDPOINT",
|
||||
"label": "Azure AI Search Endpoint",
|
||||
"description": "You need to provide your Endpoint for Azure AI Search."
|
||||
},
|
||||
{
|
||||
"authField": "AZURE_COGNITIVE_SEARCH_INDEX_NAME",
|
||||
"label": "Azur Cognitive Search Index Name",
|
||||
"description": "You need to provide your Index Name for Azure Cognitive Search."
|
||||
"authField": "AZURE_AI_SEARCH_INDEX_NAME",
|
||||
"label": "Azure AI Search Index Name",
|
||||
"description": "You need to provide your Index Name for Azure AI Search."
|
||||
},
|
||||
{
|
||||
"authField": "AZURE_COGNITIVE_SEARCH_API_KEY",
|
||||
"label": "Azur Cognitive Search API Key",
|
||||
"description": "You need to provideq your API Key for Azure Cognitive Search."
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Code Interpreter",
|
||||
"pluginKey": "codeinterpreter",
|
||||
"description": "[Experimental] Analyze files and run code online with ease. Requires dockerized python server in /pyserver/",
|
||||
"icon": "/assets/code.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "OPENAI_API_KEY",
|
||||
"label": "OpenAI API Key",
|
||||
"description": "Gets Code from Open AI API"
|
||||
"authField": "AZURE_AI_SEARCH_API_KEY",
|
||||
"label": "Azure AI Search API Key",
|
||||
"description": "You need to provideq your API Key for Azure AI Search."
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
const axios = require('axios');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
async function saveImageFromUrl(url, outputPath, outputFilename) {
|
||||
try {
|
||||
@@ -32,7 +33,7 @@ async function saveImageFromUrl(url, outputPath, outputFilename) {
|
||||
writer.on('error', reject);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('[saveImageFromUrl] Error while saving the image:', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
104
api/app/clients/tools/structured/AzureAISearch.js
Normal file
104
api/app/clients/tools/structured/AzureAISearch.js
Normal file
@@ -0,0 +1,104 @@
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class AzureAISearch extends StructuredTool {
|
||||
// Constants for default values
|
||||
static DEFAULT_API_VERSION = '2023-11-01';
|
||||
static DEFAULT_QUERY_TYPE = 'simple';
|
||||
static DEFAULT_TOP = 5;
|
||||
|
||||
// Helper function for initializing properties
|
||||
_initializeField(field, envVar, defaultValue) {
|
||||
return field || process.env[envVar] || defaultValue;
|
||||
}
|
||||
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
|
||||
// Initialize properties using helper function
|
||||
this.serviceEndpoint = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SERVICE_ENDPOINT,
|
||||
'AZURE_AI_SEARCH_SERVICE_ENDPOINT',
|
||||
);
|
||||
this.indexName = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_INDEX_NAME,
|
||||
'AZURE_AI_SEARCH_INDEX_NAME',
|
||||
);
|
||||
this.apiKey = this._initializeField(fields.AZURE_AI_SEARCH_API_KEY, 'AZURE_AI_SEARCH_API_KEY');
|
||||
this.apiVersion = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_API_VERSION,
|
||||
'AZURE_AI_SEARCH_API_VERSION',
|
||||
AzureAISearch.DEFAULT_API_VERSION,
|
||||
);
|
||||
this.queryType = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE,
|
||||
'AZURE_AI_SEARCH_SEARCH_OPTION_QUERY_TYPE',
|
||||
AzureAISearch.DEFAULT_QUERY_TYPE,
|
||||
);
|
||||
this.top = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SEARCH_OPTION_TOP,
|
||||
'AZURE_AI_SEARCH_SEARCH_OPTION_TOP',
|
||||
AzureAISearch.DEFAULT_TOP,
|
||||
);
|
||||
this.select = this._initializeField(
|
||||
fields.AZURE_AI_SEARCH_SEARCH_OPTION_SELECT,
|
||||
'AZURE_AI_SEARCH_SEARCH_OPTION_SELECT',
|
||||
);
|
||||
|
||||
// Check for required fields
|
||||
if (!this.serviceEndpoint || !this.indexName || !this.apiKey) {
|
||||
throw new Error(
|
||||
'Missing AZURE_AI_SEARCH_SERVICE_ENDPOINT, AZURE_AI_SEARCH_INDEX_NAME, or AZURE_AI_SEARCH_API_KEY environment variable.',
|
||||
);
|
||||
}
|
||||
|
||||
// Create SearchClient
|
||||
this.client = new SearchClient(
|
||||
this.serviceEndpoint,
|
||||
this.indexName,
|
||||
new AzureKeyCredential(this.apiKey),
|
||||
{ apiVersion: this.apiVersion },
|
||||
);
|
||||
|
||||
// Define schema
|
||||
this.schema = z.object({
|
||||
query: z.string().describe('Search word or phrase to Azure AI Search'),
|
||||
});
|
||||
}
|
||||
|
||||
// Simplified getter methods
|
||||
get name() {
|
||||
return 'azure-ai-search';
|
||||
}
|
||||
|
||||
get description() {
|
||||
return 'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
|
||||
}
|
||||
|
||||
// Improved error handling and logging
|
||||
async _call(data) {
|
||||
const { query } = data;
|
||||
try {
|
||||
const searchOption = {
|
||||
queryType: this.queryType,
|
||||
top: this.top,
|
||||
};
|
||||
if (this.select) {
|
||||
searchOption.select = this.select.split(',');
|
||||
}
|
||||
const searchResults = await this.client.search(query, searchOption);
|
||||
const resultDocuments = [];
|
||||
for await (const result of searchResults.results) {
|
||||
resultDocuments.push(result.document);
|
||||
}
|
||||
return JSON.stringify(resultDocuments);
|
||||
} catch (error) {
|
||||
logger.error('Azure AI Search request failed', error);
|
||||
return 'There was an error with Azure AI Search.';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AzureAISearch;
|
||||
@@ -1,116 +0,0 @@
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
|
||||
class AzureCognitiveSearch extends StructuredTool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
this.serviceEndpoint =
|
||||
fields.AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT || this.getServiceEndpoint();
|
||||
this.indexName = fields.AZURE_COGNITIVE_SEARCH_INDEX_NAME || this.getIndexName();
|
||||
this.apiKey = fields.AZURE_COGNITIVE_SEARCH_API_KEY || this.getApiKey();
|
||||
|
||||
this.apiVersion = fields.AZURE_COGNITIVE_SEARCH_API_VERSION || this.getApiVersion();
|
||||
|
||||
this.queryType = fields.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_QUERY_TYPE || this.getQueryType();
|
||||
this.top = fields.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP || this.getTop();
|
||||
this.select = fields.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT || this.getSelect();
|
||||
|
||||
this.client = new SearchClient(
|
||||
this.serviceEndpoint,
|
||||
this.indexName,
|
||||
new AzureKeyCredential(this.apiKey),
|
||||
{
|
||||
apiVersion: this.apiVersion,
|
||||
},
|
||||
);
|
||||
this.schema = z.object({
|
||||
query: z.string().describe('Search word or phrase to Azure Cognitive Search'),
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the tool.
|
||||
* @type {string}
|
||||
*/
|
||||
name = 'azure-cognitive-search';
|
||||
|
||||
/**
|
||||
* A description for the agent to use
|
||||
* @type {string}
|
||||
*/
|
||||
description =
|
||||
'Use the \'azure-cognitive-search\' tool to retrieve search results relevant to your input';
|
||||
|
||||
getServiceEndpoint() {
|
||||
const serviceEndpoint = process.env.AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT || '';
|
||||
if (!serviceEndpoint) {
|
||||
throw new Error('Missing AZURE_COGNITIVE_SEARCH_SERVICE_ENDPOINT environment variable.');
|
||||
}
|
||||
return serviceEndpoint;
|
||||
}
|
||||
|
||||
getIndexName() {
|
||||
const indexName = process.env.AZURE_COGNITIVE_SEARCH_INDEX_NAME || '';
|
||||
if (!indexName) {
|
||||
throw new Error('Missing AZURE_COGNITIVE_SEARCH_INDEX_NAME environment variable.');
|
||||
}
|
||||
return indexName;
|
||||
}
|
||||
|
||||
getApiKey() {
|
||||
const apiKey = process.env.AZURE_COGNITIVE_SEARCH_API_KEY || '';
|
||||
if (!apiKey) {
|
||||
throw new Error('Missing AZURE_COGNITIVE_SEARCH_API_KEY environment variable.');
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
getApiVersion() {
|
||||
return process.env.AZURE_COGNITIVE_SEARCH_API_VERSION || '2020-06-30';
|
||||
}
|
||||
|
||||
getQueryType() {
|
||||
return process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_QUERY_TYPE || 'simple';
|
||||
}
|
||||
|
||||
getTop() {
|
||||
if (process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP) {
|
||||
return Number(process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_TOP);
|
||||
} else {
|
||||
return 5;
|
||||
}
|
||||
}
|
||||
|
||||
getSelect() {
|
||||
if (process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT) {
|
||||
return process.env.AZURE_COGNITIVE_SEARCH_SEARCH_OPTION_SELECT.split(',');
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
async _call(data) {
|
||||
const { query } = data;
|
||||
try {
|
||||
const searchOption = {
|
||||
queryType: this.queryType,
|
||||
top: this.top,
|
||||
};
|
||||
if (this.select) {
|
||||
searchOption.select = this.select;
|
||||
}
|
||||
const searchResults = await this.client.search(query, searchOption);
|
||||
const resultDocuments = [];
|
||||
for await (const result of searchResults.results) {
|
||||
resultDocuments.push(result.document);
|
||||
}
|
||||
return JSON.stringify(resultDocuments);
|
||||
} catch (error) {
|
||||
console.error(`Azure Cognitive Search request failed: ${error}`);
|
||||
return 'There was an error with Azure Cognitive Search.';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = AzureCognitiveSearch;
|
||||
@@ -28,14 +28,14 @@ class RunCode extends StructuredTool {
|
||||
}
|
||||
|
||||
async _call({ code, language = 'python' }) {
|
||||
// console.log('<--------------- Running Code --------------->', { code, language });
|
||||
// logger.debug('<--------------- Running Code --------------->', { code, language });
|
||||
const response = await axios({
|
||||
url: `${this.url}/repl`,
|
||||
method: 'post',
|
||||
headers: this.headers,
|
||||
data: { code, language },
|
||||
});
|
||||
// console.log('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
// logger.debug('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
return response.data.result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -42,14 +42,14 @@ class RunCode extends StructuredTool {
|
||||
}
|
||||
|
||||
async _call({ code, language = 'python' }) {
|
||||
// console.log('<--------------- Running Code --------------->', { code, language });
|
||||
// logger.debug('<--------------- Running Code --------------->', { code, language });
|
||||
const response = await axios({
|
||||
url: `${this.url}/repl`,
|
||||
method: 'post',
|
||||
headers: this.headers,
|
||||
data: { code, language },
|
||||
});
|
||||
// console.log('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
// logger.debug('<--------------- Sucessfully ran Code --------------->', response.data);
|
||||
return response.data.result;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,9 @@ const OpenAI = require('openai');
|
||||
const { Tool } = require('langchain/tools');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const saveImageFromUrl = require('../saveImageFromUrl');
|
||||
const extractBaseURL = require('../../../../utils/extractBaseURL');
|
||||
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { DALLE3_SYSTEM_PROMPT, DALLE_REVERSE_PROXY, PROXY } = process.env;
|
||||
class DALLE3 extends Tool {
|
||||
constructor(fields = {}) {
|
||||
@@ -126,9 +128,12 @@ Error Message: ${error.message}`;
|
||||
|
||||
if (match) {
|
||||
imageName = match[0];
|
||||
console.log(imageName); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
logger.debug('[DALL-E-3]', { imageName }); // Output: img-lgCf7ppcbhqQrz6a5ear6FOb.png
|
||||
} else {
|
||||
console.log('No image name found in the string.');
|
||||
logger.debug('[DALL-E-3] No image name found in the string.', {
|
||||
theImageUrl,
|
||||
data: resp.data[0],
|
||||
});
|
||||
}
|
||||
|
||||
this.outputPath = path.resolve(
|
||||
@@ -154,7 +159,7 @@ Error Message: ${error.message}`;
|
||||
await saveImageFromUrl(theImageUrl, this.outputPath, imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('Error while saving the image:', error);
|
||||
this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { PromptTemplate } = require('langchain/prompts');
|
||||
const { createExtractionChainFromZod } = require('./extractionChain');
|
||||
// const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const axios = require('axios');
|
||||
const { z } = require('zod');
|
||||
const { createExtractionChainFromZod } = require('./extractionChain');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const envs = ['Nodejs', 'Go', 'Bash', 'Rust', 'Python3', 'PHP', 'Java', 'Perl', 'DotNET'];
|
||||
const env = z.enum(envs);
|
||||
@@ -34,8 +35,8 @@ async function extractEnvFromCode(code, model) {
|
||||
// const chatModel = new ChatOpenAI({ openAIApiKey, modelName: 'gpt-4-0613', temperature: 0 });
|
||||
const chain = createExtractionChainFromZod(zodSchema, model, { prompt, verbose: true });
|
||||
const result = await chain.run(code);
|
||||
console.log('<--------------- extractEnvFromCode --------------->');
|
||||
console.log(result);
|
||||
logger.debug('<--------------- extractEnvFromCode --------------->');
|
||||
logger.debug(result);
|
||||
return result.env;
|
||||
}
|
||||
|
||||
@@ -69,7 +70,7 @@ class RunCommand extends StructuredTool {
|
||||
}
|
||||
|
||||
async _call(data) {
|
||||
console.log(`<--------------- Running ${data} --------------->`);
|
||||
logger.debug(`<--------------- Running ${data} --------------->`);
|
||||
const response = await axios({
|
||||
url: `${this.url}/commands`,
|
||||
method: 'post',
|
||||
@@ -96,7 +97,7 @@ class ReadFile extends StructuredTool {
|
||||
}
|
||||
|
||||
async _call(data) {
|
||||
console.log(`<--------------- Reading ${data} --------------->`);
|
||||
logger.debug(`<--------------- Reading ${data} --------------->`);
|
||||
const response = await axios.get(`${this.url}/files`, { params: data, headers: this.headers });
|
||||
return response.data;
|
||||
}
|
||||
@@ -121,12 +122,12 @@ class WriteFile extends StructuredTool {
|
||||
|
||||
async _call(data) {
|
||||
let { env, path, content } = data;
|
||||
console.log(`<--------------- environment ${env} typeof ${typeof env}--------------->`);
|
||||
logger.debug(`<--------------- environment ${env} typeof ${typeof env}--------------->`);
|
||||
if (env && !envs.includes(env)) {
|
||||
console.log(`<--------------- Invalid environment ${env} --------------->`);
|
||||
logger.debug(`<--------------- Invalid environment ${env} --------------->`);
|
||||
env = await extractEnvFromCode(content, this.model);
|
||||
} else if (!env) {
|
||||
console.log('<--------------- Undefined environment --------------->');
|
||||
logger.debug('<--------------- Undefined environment --------------->');
|
||||
env = await extractEnvFromCode(content, this.model);
|
||||
}
|
||||
|
||||
@@ -139,7 +140,7 @@ class WriteFile extends StructuredTool {
|
||||
content,
|
||||
},
|
||||
};
|
||||
console.log('Writing to file', JSON.stringify(payload));
|
||||
logger.debug('Writing to file', JSON.stringify(payload));
|
||||
|
||||
await axios({
|
||||
url: `${this.url}/files`,
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// Generates image using stable diffusion webui's api (automatic1111)
|
||||
const fs = require('fs');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class StableDiffusionAPI extends StructuredTool {
|
||||
constructor(fields) {
|
||||
@@ -107,7 +108,7 @@ class StableDiffusionAPI extends StructuredTool {
|
||||
.toFile(this.outputPath + '/' + imageName);
|
||||
this.result = this.getMarkdownImageUrl(imageName);
|
||||
} catch (error) {
|
||||
console.error('Error while saving the image:', error);
|
||||
logger.error('[StableDiffusion] Error while saving the image:', error);
|
||||
// this.result = theImageUrl;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
/* eslint-disable no-useless-escape */
|
||||
const axios = require('axios');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { z } = require('zod');
|
||||
const { StructuredTool } = require('langchain/tools');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class WolframAlphaAPI extends StructuredTool {
|
||||
constructor(fields) {
|
||||
@@ -47,7 +48,7 @@ class WolframAlphaAPI extends StructuredTool {
|
||||
const response = await axios.get(url, { responseType: 'text' });
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
console.error(`Error fetching raw text: ${error}`);
|
||||
logger.error('[WolframAlphaAPI] Error fetching raw text:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -78,11 +79,10 @@ class WolframAlphaAPI extends StructuredTool {
|
||||
return response;
|
||||
} catch (error) {
|
||||
if (error.response && error.response.data) {
|
||||
console.log('Error data:', error.response.data);
|
||||
logger.error('[WolframAlphaAPI] Error data:', error);
|
||||
return error.response.data;
|
||||
} else {
|
||||
console.log('Error querying Wolfram Alpha', error.message);
|
||||
// throw error;
|
||||
logger.error('[WolframAlphaAPI] Error querying Wolfram Alpha', error);
|
||||
return 'There was an error querying Wolfram Alpha.';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ const path = require('path');
|
||||
const OpenAI = require('openai');
|
||||
const DALLE3 = require('../DALLE3');
|
||||
const saveImageFromUrl = require('../../saveImageFromUrl');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
jest.mock('openai');
|
||||
|
||||
@@ -145,10 +146,13 @@ describe('DALLE3', () => {
|
||||
},
|
||||
],
|
||||
};
|
||||
console.log = jest.fn(); // Mock console.log
|
||||
|
||||
generate.mockResolvedValue(mockResponse);
|
||||
await dalle._call(mockData);
|
||||
expect(console.log).toHaveBeenCalledWith('No image name found in the string.');
|
||||
expect(logger.debug).toHaveBeenCalledWith('[DALL-E-3] No image name found in the string.', {
|
||||
data: { url: 'http://example.com/invalid-url' },
|
||||
theImageUrl: 'http://example.com/invalid-url',
|
||||
});
|
||||
});
|
||||
|
||||
it('should create the directory if it does not exist', async () => {
|
||||
@@ -182,9 +186,8 @@ describe('DALLE3', () => {
|
||||
const error = new Error('Error while saving the image');
|
||||
generate.mockResolvedValue(mockResponse);
|
||||
saveImageFromUrl.mockRejectedValue(error);
|
||||
console.error = jest.fn(); // Mock console.error
|
||||
const result = await dalle._call(mockData);
|
||||
expect(console.error).toHaveBeenCalledWith('Error while saving the image:', error);
|
||||
expect(logger.error).toHaveBeenCalledWith('Error while saving the image:', error);
|
||||
expect(result).toBe(mockResponse.data[0].url);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const OpenAI = require('openai');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Handles errors that may occur when making requests to OpenAI's API.
|
||||
@@ -12,14 +13,14 @@ const OpenAI = require('openai');
|
||||
*/
|
||||
async function handleOpenAIErrors(err, errorCallback, context = 'stream') {
|
||||
if (err instanceof OpenAI.APIError && err?.message?.includes('abort')) {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] Aborted Message`);
|
||||
}
|
||||
if (err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason')) {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] Missing finish_reason`);
|
||||
} else if (err instanceof OpenAI.APIError) {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] API Error`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] API error`);
|
||||
} else {
|
||||
console.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
|
||||
logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
|
||||
}
|
||||
|
||||
if (errorCallback) {
|
||||
|
||||
@@ -1,31 +1,28 @@
|
||||
const { getUserPluginAuthValue } = require('../../../../server/services/PluginService');
|
||||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { ZapierToolKit } = require('langchain/agents');
|
||||
const { SerpAPI, ZapierNLAWrapper } = require('langchain/tools');
|
||||
const { ChatOpenAI } = require('langchain/chat_models/openai');
|
||||
const { Calculator } = require('langchain/tools/calculator');
|
||||
const { WebBrowser } = require('langchain/tools/webbrowser');
|
||||
const { SerpAPI, ZapierNLAWrapper } = require('langchain/tools');
|
||||
const { OpenAIEmbeddings } = require('langchain/embeddings/openai');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const {
|
||||
availableTools,
|
||||
CodeInterpreter,
|
||||
AIPluginTool,
|
||||
GoogleSearchAPI,
|
||||
WolframAlphaAPI,
|
||||
StructuredWolfram,
|
||||
HttpRequestTool,
|
||||
OpenAICreateImage,
|
||||
StableDiffusionAPI,
|
||||
DALLE3,
|
||||
StructuredSD,
|
||||
AzureCognitiveSearch,
|
||||
AzureAISearch,
|
||||
StructuredACS,
|
||||
E2BTools,
|
||||
CodeSherpa,
|
||||
CodeSherpaTools,
|
||||
CodeBrew,
|
||||
} = require('../');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { loadToolSuite } = require('./loadToolSuite');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const getOpenAIKey = async (options, user) => {
|
||||
let openAIApiKey = options.openAIApiKey ?? process.env.OPENAI_API_KEY;
|
||||
@@ -65,7 +62,7 @@ const validateTools = async (user, tools = []) => {
|
||||
|
||||
return Array.from(validToolsSet.values());
|
||||
} catch (err) {
|
||||
console.log('There was a problem validating tools', err);
|
||||
logger.error('[validateTools] There was a problem validating tools', err);
|
||||
throw new Error(err);
|
||||
}
|
||||
};
|
||||
@@ -96,12 +93,11 @@ const loadTools = async ({
|
||||
}) => {
|
||||
const toolConstructors = {
|
||||
calculator: Calculator,
|
||||
codeinterpreter: CodeInterpreter,
|
||||
google: GoogleSearchAPI,
|
||||
wolfram: functions ? StructuredWolfram : WolframAlphaAPI,
|
||||
'dall-e': OpenAICreateImage,
|
||||
'stable-diffusion': functions ? StructuredSD : StableDiffusionAPI,
|
||||
'azure-cognitive-search': functions ? StructuredACS : AzureCognitiveSearch,
|
||||
'azure-ai-search': functions ? StructuredACS : AzureAISearch,
|
||||
CodeBrew: CodeBrew,
|
||||
};
|
||||
|
||||
@@ -163,15 +159,6 @@ const loadTools = async ({
|
||||
const zapier = new ZapierNLAWrapper({ apiKey });
|
||||
return ZapierToolKit.fromZapierNLAWrapper(zapier);
|
||||
},
|
||||
plugins: async () => {
|
||||
return [
|
||||
new HttpRequestTool(),
|
||||
await AIPluginTool.fromPluginUrl(
|
||||
'https://www.klarna.com/.well-known/ai-plugin.json',
|
||||
new ChatOpenAI({ openAIApiKey: options.openAIApiKey, temperature: 0 }),
|
||||
),
|
||||
];
|
||||
},
|
||||
};
|
||||
|
||||
const requestedTools = {};
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { z } = require('zod');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
const { logger } = require('~/config');
|
||||
const { createOpenAPIPlugin } = require('~/app/clients/tools/dynamic/OpenAPIPlugin');
|
||||
|
||||
// The minimum Manifest definition
|
||||
const ManifestDefinition = z.object({
|
||||
@@ -26,28 +27,17 @@ const ManifestDefinition = z.object({
|
||||
legal_info_url: z.string().optional(),
|
||||
});
|
||||
|
||||
function validateJson(json, verbose = true) {
|
||||
function validateJson(json) {
|
||||
try {
|
||||
return ManifestDefinition.parse(json);
|
||||
} catch (error) {
|
||||
if (verbose) {
|
||||
console.debug('validateJson error', error);
|
||||
}
|
||||
logger.debug('[validateJson] manifest parsing error', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// omit the LLM to return the well known jsons as objects
|
||||
async function loadSpecs({
|
||||
llm,
|
||||
user,
|
||||
message,
|
||||
tools = [],
|
||||
map = false,
|
||||
memory,
|
||||
signal,
|
||||
verbose = false,
|
||||
}) {
|
||||
async function loadSpecs({ llm, user, message, tools = [], map = false, memory, signal }) {
|
||||
const directoryPath = path.join(__dirname, '..', '.well-known');
|
||||
let files = [];
|
||||
|
||||
@@ -60,7 +50,7 @@ async function loadSpecs({
|
||||
await fs.promises.access(filePath, fs.constants.F_OK);
|
||||
files.push(tools[i] + '.json');
|
||||
} catch (err) {
|
||||
console.error(`File ${tools[i] + '.json'} does not exist`);
|
||||
logger.error(`[loadSpecs] File ${tools[i] + '.json'} does not exist`, err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,9 +63,7 @@ async function loadSpecs({
|
||||
const validJsons = [];
|
||||
const constructorMap = {};
|
||||
|
||||
if (verbose) {
|
||||
console.debug('files', files);
|
||||
}
|
||||
logger.debug('[validateJson] files', files);
|
||||
|
||||
for (const file of files) {
|
||||
if (path.extname(file) === '.json') {
|
||||
@@ -84,7 +72,7 @@ async function loadSpecs({
|
||||
const json = JSON.parse(fileContent);
|
||||
|
||||
if (!validateJson(json)) {
|
||||
verbose && console.debug('Invalid json', json);
|
||||
logger.debug('[validateJson] Invalid json', json);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -97,13 +85,12 @@ async function loadSpecs({
|
||||
memory,
|
||||
signal,
|
||||
user,
|
||||
verbose,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (llm) {
|
||||
validJsons.push(createOpenAPIPlugin({ data: json, llm, verbose }));
|
||||
validJsons.push(createOpenAPIPlugin({ data: json, llm }));
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -117,10 +104,8 @@ async function loadSpecs({
|
||||
|
||||
const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin);
|
||||
|
||||
// if (verbose) {
|
||||
// console.debug('plugins', plugins);
|
||||
// console.debug(plugins[0].name);
|
||||
// }
|
||||
// logger.debug('[validateJson] plugins', plugins);
|
||||
// logger.debug(plugins[0].name);
|
||||
|
||||
return plugins;
|
||||
}
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { isEnabled } = require('../server/utils');
|
||||
const throttle = require('lodash/throttle');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const titleConvo = async ({ text, response }) => {
|
||||
let title = 'New Chat';
|
||||
@@ -30,11 +31,10 @@ const titleConvo = async ({ text, response }) => {
|
||||
const res = await titleGenerator.sendMessage(titlePrompt, options);
|
||||
title = res.response.replace(/Title: /, '').replace(/[".]/g, '');
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
console.log('There was an issue generating title, see error above');
|
||||
logger.error('There was an issue generating title with BingAI', e);
|
||||
}
|
||||
|
||||
console.log('CONVERSATION TITLE', title);
|
||||
logger.debug('[/ask/bingAI] CONVERSATION TITLE: ' + title);
|
||||
return title;
|
||||
};
|
||||
|
||||
|
||||
8
api/cache/banViolation.js
vendored
8
api/cache/banViolation.js
vendored
@@ -1,6 +1,8 @@
|
||||
const Session = require('../models/Session');
|
||||
const Session = require('~/models/Session');
|
||||
const getLogStores = require('./getLogStores');
|
||||
const { isEnabled, math, removePorts } = require('../server/utils');
|
||||
const { isEnabled, math, removePorts } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { BAN_VIOLATIONS, BAN_INTERVAL } = process.env ?? {};
|
||||
const interval = math(BAN_INTERVAL, 20);
|
||||
|
||||
@@ -54,7 +56,7 @@ const banViolation = async (req, res, errorMessage) => {
|
||||
}
|
||||
|
||||
req.ip = removePorts(req);
|
||||
console.log(
|
||||
logger.info(
|
||||
`[BAN] Banning user ${user_id} ${req.ip ? `@ ${req.ip} ` : ''}for ${
|
||||
duration / 1000 / 60
|
||||
} minutes`,
|
||||
|
||||
8
api/cache/getLogStores.js
vendored
8
api/cache/getLogStores.js
vendored
@@ -1,7 +1,8 @@
|
||||
const Keyv = require('keyv');
|
||||
const keyvMongo = require('./keyvMongo');
|
||||
const keyvRedis = require('./keyvRedis');
|
||||
const { math, isEnabled } = require('../server/utils');
|
||||
const { CacheKeys } = require('~/common/enums');
|
||||
const { math, isEnabled } = require('~/server/utils');
|
||||
const { logFile, violationFile } = require('./keyvFiles');
|
||||
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
|
||||
|
||||
@@ -17,7 +18,12 @@ const pending_req = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: 'pending_req' });
|
||||
|
||||
const config = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.CONFIG });
|
||||
|
||||
const namespaces = {
|
||||
config,
|
||||
pending_req,
|
||||
ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }),
|
||||
general: new Keyv({ store: logFile, namespace: 'violations' }),
|
||||
|
||||
4
api/cache/keyvMongo.js
vendored
4
api/cache/keyvMongo.js
vendored
@@ -1,7 +1,9 @@
|
||||
const KeyvMongo = require('@keyv/mongo');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { MONGO_URI } = process.env ?? {};
|
||||
|
||||
const keyvMongo = new KeyvMongo(MONGO_URI, { collection: 'logs' });
|
||||
keyvMongo.on('error', (err) => console.error('KeyvMongo connection error:', err));
|
||||
keyvMongo.on('error', (err) => logger.error('KeyvMongo connection error:', err));
|
||||
|
||||
module.exports = keyvMongo;
|
||||
|
||||
13
api/cache/keyvRedis.js
vendored
13
api/cache/keyvRedis.js
vendored
@@ -1,14 +1,19 @@
|
||||
const KeyvRedis = require('@keyv/redis');
|
||||
const { logger } = require('~/config');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
const { REDIS_URI } = process.env;
|
||||
const { REDIS_URI, USE_REDIS } = process.env;
|
||||
|
||||
let keyvRedis;
|
||||
|
||||
if (REDIS_URI) {
|
||||
if (REDIS_URI && isEnabled(USE_REDIS)) {
|
||||
keyvRedis = new KeyvRedis(REDIS_URI, { useRedisSets: false });
|
||||
keyvRedis.on('error', (err) => console.error('KeyvRedis connection error:', err));
|
||||
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
|
||||
keyvRedis.setMaxListeners(20);
|
||||
} else {
|
||||
// console.log('REDIS_URI not provided. Redis module will not be initialized.');
|
||||
logger.info(
|
||||
'`REDIS_URI` not provided, or `USE_REDIS` not set. Redis module will not be initialized.',
|
||||
);
|
||||
}
|
||||
|
||||
module.exports = keyvRedis;
|
||||
|
||||
17
api/common/enums.js
Normal file
17
api/common/enums.js
Normal file
@@ -0,0 +1,17 @@
|
||||
/**
|
||||
* @typedef {Object} CacheKeys
|
||||
* @property {'config'} CONFIG - Key for the config cache.
|
||||
* @property {'plugins'} PLUGINS - Key for the plugins cache.
|
||||
* @property {'modelsConfig'} MODELS_CONFIG - Key for the model config cache.
|
||||
* @property {'defaultConfig'} DEFAULT_CONFIG - Key for the default config cache.
|
||||
* @property {'overrideConfig'} OVERRIDE_CONFIG - Key for the override config cache.
|
||||
*/
|
||||
const CacheKeys = {
|
||||
CONFIG: 'config',
|
||||
PLUGINS: 'plugins',
|
||||
MODELS_CONFIG: 'modelsConfig',
|
||||
DEFAULT_CONFIG: 'defaultConfig',
|
||||
OVERRIDE_CONFIG: 'overrideConfig',
|
||||
};
|
||||
|
||||
module.exports = { CacheKeys };
|
||||
5
api/config/index.js
Normal file
5
api/config/index.js
Normal file
@@ -0,0 +1,5 @@
|
||||
const logger = require('./winston');
|
||||
|
||||
module.exports = {
|
||||
logger,
|
||||
};
|
||||
78
api/config/meiliLogger.js
Normal file
78
api/config/meiliLogger.js
Normal file
@@ -0,0 +1,78 @@
|
||||
const path = require('path');
|
||||
const winston = require('winston');
|
||||
require('winston-daily-rotate-file');
|
||||
|
||||
const logDir = path.join(__dirname, '..', 'logs');
|
||||
|
||||
const { NODE_ENV } = process.env;
|
||||
|
||||
const levels = {
|
||||
error: 0,
|
||||
warn: 1,
|
||||
info: 2,
|
||||
http: 3,
|
||||
verbose: 4,
|
||||
debug: 5,
|
||||
activity: 6,
|
||||
silly: 7,
|
||||
};
|
||||
|
||||
winston.addColors({
|
||||
info: 'green', // fontStyle color
|
||||
warn: 'italic yellow',
|
||||
error: 'red',
|
||||
debug: 'blue',
|
||||
});
|
||||
|
||||
const level = () => {
|
||||
const env = NODE_ENV || 'development';
|
||||
const isDevelopment = env === 'development';
|
||||
return isDevelopment ? 'debug' : 'warn';
|
||||
};
|
||||
|
||||
const fileFormat = winston.format.combine(
|
||||
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||
winston.format.errors({ stack: true }),
|
||||
winston.format.splat(),
|
||||
);
|
||||
|
||||
const transports = [
|
||||
new winston.transports.DailyRotateFile({
|
||||
level: 'debug',
|
||||
filename: `${logDir}/meiliSync-%DATE%.log`,
|
||||
datePattern: 'YYYY-MM-DD',
|
||||
zippedArchive: true,
|
||||
maxSize: '20m',
|
||||
maxFiles: '14d',
|
||||
format: fileFormat,
|
||||
}),
|
||||
];
|
||||
|
||||
// if (NODE_ENV !== 'production') {
|
||||
// transports.push(
|
||||
// new winston.transports.Console({
|
||||
// format: winston.format.combine(winston.format.colorize(), winston.format.simple()),
|
||||
// }),
|
||||
// );
|
||||
// }
|
||||
|
||||
const consoleFormat = winston.format.combine(
|
||||
winston.format.colorize({ all: true }),
|
||||
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||
winston.format.printf((info) => `${info.timestamp} ${info.level}: ${info.message}`),
|
||||
);
|
||||
|
||||
transports.push(
|
||||
new winston.transports.Console({
|
||||
level: 'info',
|
||||
format: consoleFormat,
|
||||
}),
|
||||
);
|
||||
|
||||
const logger = winston.createLogger({
|
||||
level: level(),
|
||||
levels,
|
||||
transports,
|
||||
});
|
||||
|
||||
module.exports = logger;
|
||||
160
api/config/parsers.js
Normal file
160
api/config/parsers.js
Normal file
@@ -0,0 +1,160 @@
|
||||
const winston = require('winston');
|
||||
const traverse = require('traverse');
|
||||
const { klona } = require('klona/full');
|
||||
|
||||
const SPLAT_SYMBOL = Symbol.for('splat');
|
||||
const MESSAGE_SYMBOL = Symbol.for('message');
|
||||
|
||||
const sensitiveKeys = [/^(sk-)[^\s]+/, /(Bearer )[^\s]+/, /(api-key:? )[^\s]+/, /(key=)[^\s]+/];
|
||||
|
||||
/**
|
||||
* Determines if a given value string is sensitive and returns matching regex patterns.
|
||||
*
|
||||
* @param {string} valueStr - The value string to check.
|
||||
* @returns {Array<RegExp>} An array of regex patterns that match the value string.
|
||||
*/
|
||||
function getMatchingSensitivePatterns(valueStr) {
|
||||
if (valueStr) {
|
||||
// Filter and return all regex patterns that match the value string
|
||||
return sensitiveKeys.filter((regex) => regex.test(valueStr));
|
||||
}
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Redacts sensitive information from a console message.
|
||||
*
|
||||
* @param {string} str - The console message to be redacted.
|
||||
* @returns {string} - The redacted console message.
|
||||
*/
|
||||
function redactMessage(str) {
|
||||
const patterns = getMatchingSensitivePatterns(str);
|
||||
|
||||
if (patterns.length === 0) {
|
||||
return str;
|
||||
}
|
||||
|
||||
patterns.forEach((pattern) => {
|
||||
str = str.replace(pattern, '$1[REDACTED]');
|
||||
});
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
/**
|
||||
* Redacts sensitive information from log messages if the log level is 'error'.
|
||||
* Note: Intentionally mutates the object.
|
||||
* @param {Object} info - The log information object.
|
||||
* @returns {Object} - The modified log information object.
|
||||
*/
|
||||
const redactFormat = winston.format((info) => {
|
||||
if (info.level === 'error') {
|
||||
info.message = redactMessage(info.message);
|
||||
if (info[MESSAGE_SYMBOL]) {
|
||||
info[MESSAGE_SYMBOL] = redactMessage(info[MESSAGE_SYMBOL]);
|
||||
}
|
||||
}
|
||||
return info;
|
||||
});
|
||||
|
||||
/**
|
||||
* Truncates long strings, especially base64 image data, within log messages.
|
||||
*
|
||||
* @param {any} value - The value to be inspected and potentially truncated.
|
||||
* @param {number} [length] - The length at which to truncate the value. Default: 100.
|
||||
* @returns {any} - The truncated or original value.
|
||||
*/
|
||||
const truncateLongStrings = (value, length = 100) => {
|
||||
if (typeof value === 'string') {
|
||||
return value.length > length ? value.substring(0, length) + '... [truncated]' : value;
|
||||
}
|
||||
|
||||
return value;
|
||||
};
|
||||
|
||||
/**
|
||||
* An array mapping function that truncates long strings (objects converted to JSON strings).
|
||||
* @param {any} item - The item to be condensed.
|
||||
* @returns {any} - The condensed item.
|
||||
*/
|
||||
const condenseArray = (item) => {
|
||||
if (typeof item === 'string') {
|
||||
return truncateLongStrings(JSON.stringify(item));
|
||||
} else if (typeof item === 'object') {
|
||||
return truncateLongStrings(JSON.stringify(item));
|
||||
}
|
||||
return item;
|
||||
};
|
||||
|
||||
/**
|
||||
* Formats log messages for debugging purposes.
|
||||
* - Truncates long strings within log messages.
|
||||
* - Condenses arrays by truncating long strings and objects as strings within array items.
|
||||
* - Redacts sensitive information from log messages if the log level is 'error'.
|
||||
* - Converts log information object to a formatted string.
|
||||
*
|
||||
* @param {Object} options - The options for formatting log messages.
|
||||
* @param {string} options.level - The log level.
|
||||
* @param {string} options.message - The log message.
|
||||
* @param {string} options.timestamp - The timestamp of the log message.
|
||||
* @param {Object} options.metadata - Additional metadata associated with the log message.
|
||||
* @returns {string} - The formatted log message.
|
||||
*/
|
||||
const debugTraverse = winston.format.printf(({ level, message, timestamp, ...metadata }) => {
|
||||
let msg = `${timestamp} ${level}: ${truncateLongStrings(message?.trim(), 150)}`;
|
||||
|
||||
if (level !== 'debug') {
|
||||
return msg;
|
||||
}
|
||||
|
||||
if (!metadata) {
|
||||
return msg;
|
||||
}
|
||||
|
||||
const debugValue = metadata[SPLAT_SYMBOL]?.[0];
|
||||
|
||||
if (!debugValue) {
|
||||
return msg;
|
||||
}
|
||||
|
||||
if (debugValue && Array.isArray(debugValue)) {
|
||||
msg += `\n${JSON.stringify(debugValue.map(condenseArray))}`;
|
||||
return msg;
|
||||
}
|
||||
|
||||
if (typeof debugValue !== 'object') {
|
||||
return (msg += ` ${debugValue}`);
|
||||
}
|
||||
|
||||
msg += '\n{';
|
||||
|
||||
const copy = klona(metadata);
|
||||
traverse(copy).forEach(function (value) {
|
||||
const parent = this.parent;
|
||||
const parentKey = `${parent && parent.notRoot ? parent.key + '.' : ''}`;
|
||||
const tabs = `${parent && parent.notRoot ? '\t\t' : '\t'}`;
|
||||
if (this.isLeaf && typeof value === 'string') {
|
||||
const truncatedText = truncateLongStrings(value);
|
||||
msg += `\n${tabs}${parentKey}${this.key}: ${JSON.stringify(truncatedText)},`;
|
||||
} else if (this.notLeaf && Array.isArray(value) && value.length > 0) {
|
||||
const currentMessage = `\n${tabs}// ${value.length} ${this.key.replace(/s$/, '')}(s)`;
|
||||
this.update(currentMessage, true);
|
||||
msg += currentMessage;
|
||||
const stringifiedArray = value.map(condenseArray);
|
||||
msg += `\n${tabs}${parentKey}${this.key}: [${stringifiedArray}],`;
|
||||
} else if (this.isLeaf && typeof value === 'function') {
|
||||
msg += `\n${tabs}${parentKey}${this.key}: function,`;
|
||||
} else if (this.isLeaf) {
|
||||
msg += `\n${tabs}${parentKey}${this.key}: ${value},`;
|
||||
}
|
||||
});
|
||||
|
||||
msg += '\n}';
|
||||
return msg;
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
redactFormat,
|
||||
redactMessage,
|
||||
debugTraverse,
|
||||
};
|
||||
6
api/config/paths.js
Normal file
6
api/config/paths.js
Normal file
@@ -0,0 +1,6 @@
|
||||
const path = require('path');
|
||||
|
||||
module.exports = {
|
||||
publicPath: path.resolve(__dirname, '..', '..', 'client', 'public'),
|
||||
imageOutput: path.resolve(__dirname, '..', '..', 'client', 'public', 'images'),
|
||||
};
|
||||
127
api/config/winston.js
Normal file
127
api/config/winston.js
Normal file
@@ -0,0 +1,127 @@
|
||||
const path = require('path');
|
||||
const winston = require('winston');
|
||||
require('winston-daily-rotate-file');
|
||||
const { redactFormat, redactMessage, debugTraverse } = require('./parsers');
|
||||
|
||||
const logDir = path.join(__dirname, '..', 'logs');
|
||||
|
||||
const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false } = process.env;
|
||||
|
||||
const levels = {
|
||||
error: 0,
|
||||
warn: 1,
|
||||
info: 2,
|
||||
http: 3,
|
||||
verbose: 4,
|
||||
debug: 5,
|
||||
activity: 6,
|
||||
silly: 7,
|
||||
};
|
||||
|
||||
winston.addColors({
|
||||
info: 'green', // fontStyle color
|
||||
warn: 'italic yellow',
|
||||
error: 'red',
|
||||
debug: 'blue',
|
||||
});
|
||||
|
||||
const level = () => {
|
||||
const env = NODE_ENV || 'development';
|
||||
const isDevelopment = env === 'development';
|
||||
return isDevelopment ? 'debug' : 'warn';
|
||||
};
|
||||
|
||||
const fileFormat = winston.format.combine(
|
||||
redactFormat(),
|
||||
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||
winston.format.errors({ stack: true }),
|
||||
winston.format.splat(),
|
||||
// redactErrors(),
|
||||
);
|
||||
|
||||
const transports = [
|
||||
new winston.transports.DailyRotateFile({
|
||||
level: 'error',
|
||||
filename: `${logDir}/error-%DATE%.log`,
|
||||
datePattern: 'YYYY-MM-DD',
|
||||
zippedArchive: true,
|
||||
maxSize: '20m',
|
||||
maxFiles: '14d',
|
||||
format: fileFormat,
|
||||
}),
|
||||
// new winston.transports.DailyRotateFile({
|
||||
// level: 'info',
|
||||
// filename: `${logDir}/info-%DATE%.log`,
|
||||
// datePattern: 'YYYY-MM-DD',
|
||||
// zippedArchive: true,
|
||||
// maxSize: '20m',
|
||||
// maxFiles: '14d',
|
||||
// }),
|
||||
];
|
||||
|
||||
// if (NODE_ENV !== 'production') {
|
||||
// transports.push(
|
||||
// new winston.transports.Console({
|
||||
// format: winston.format.combine(winston.format.colorize(), winston.format.simple()),
|
||||
// }),
|
||||
// );
|
||||
// }
|
||||
|
||||
if (
|
||||
(typeof DEBUG_LOGGING === 'string' && DEBUG_LOGGING?.toLowerCase() === 'true') ||
|
||||
DEBUG_LOGGING === true
|
||||
) {
|
||||
transports.push(
|
||||
new winston.transports.DailyRotateFile({
|
||||
level: 'debug',
|
||||
filename: `${logDir}/debug-%DATE%.log`,
|
||||
datePattern: 'YYYY-MM-DD',
|
||||
zippedArchive: true,
|
||||
maxSize: '20m',
|
||||
maxFiles: '14d',
|
||||
format: winston.format.combine(fileFormat, debugTraverse),
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
const consoleFormat = winston.format.combine(
|
||||
redactFormat(),
|
||||
winston.format.colorize({ all: true }),
|
||||
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
|
||||
// redactErrors(),
|
||||
winston.format.printf((info) => {
|
||||
const message = `${info.timestamp} ${info.level}: ${info.message}`;
|
||||
if (info.level.includes('error')) {
|
||||
return redactMessage(message);
|
||||
}
|
||||
|
||||
return message;
|
||||
}),
|
||||
);
|
||||
|
||||
if (
|
||||
(typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
|
||||
DEBUG_CONSOLE === true
|
||||
) {
|
||||
transports.push(
|
||||
new winston.transports.Console({
|
||||
level: 'debug',
|
||||
format: winston.format.combine(consoleFormat, debugTraverse),
|
||||
}),
|
||||
);
|
||||
} else {
|
||||
transports.push(
|
||||
new winston.transports.Console({
|
||||
level: 'info',
|
||||
format: consoleFormat,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
const logger = winston.createLogger({
|
||||
level: level(),
|
||||
levels,
|
||||
transports,
|
||||
});
|
||||
|
||||
module.exports = logger;
|
||||
@@ -3,5 +3,14 @@ module.exports = {
|
||||
clearMocks: true,
|
||||
roots: ['<rootDir>'],
|
||||
coverageDirectory: 'coverage',
|
||||
setupFiles: ['./test/jestSetup.js', './test/__mocks__/KeyvMongo.js'],
|
||||
setupFiles: [
|
||||
'./test/jestSetup.js',
|
||||
'./test/__mocks__/KeyvMongo.js',
|
||||
'./test/__mocks__/logger.js',
|
||||
'./test/__mocks__/fetchEventSource.js',
|
||||
],
|
||||
moduleNameMapper: {
|
||||
'~/(.*)': '<rootDir>/$1',
|
||||
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
|
||||
},
|
||||
};
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
const Conversation = require('../../models/schema/convoSchema');
|
||||
const Message = require('../../models/schema/messageSchema');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
let currentTimeout = null;
|
||||
const Message = require('~/models/schema/messageSchema');
|
||||
const Conversation = require('~/models/schema/convoSchema');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const searchEnabled = process.env?.SEARCH?.toLowerCase() === 'true';
|
||||
let currentTimeout = null;
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function indexSync(req, res, next) {
|
||||
@@ -21,7 +23,7 @@ async function indexSync(req, res, next) {
|
||||
});
|
||||
|
||||
const { status } = await client.health();
|
||||
// console.log(`Meilisearch: ${status}`);
|
||||
// logger.debug(`[indexSync] Meilisearch: ${status}`);
|
||||
const result = status === 'available' && !!process.env.SEARCH;
|
||||
|
||||
if (!result) {
|
||||
@@ -35,39 +37,45 @@ async function indexSync(req, res, next) {
|
||||
const messagesIndexed = messages.numberOfDocuments;
|
||||
const convosIndexed = convos.numberOfDocuments;
|
||||
|
||||
console.log(`There are ${messageCount} messages in the database, ${messagesIndexed} indexed`);
|
||||
console.log(`There are ${convoCount} convos in the database, ${convosIndexed} indexed`);
|
||||
logger.debug(
|
||||
`[indexSync] There are ${messageCount} messages in the database, ${messagesIndexed} indexed`,
|
||||
);
|
||||
logger.debug(
|
||||
`[indexSync] There are ${convoCount} convos in the database, ${convosIndexed} indexed`,
|
||||
);
|
||||
|
||||
if (messageCount !== messagesIndexed) {
|
||||
console.log('Messages out of sync, indexing');
|
||||
logger.debug('[indexSync] Messages out of sync, indexing');
|
||||
Message.syncWithMeili();
|
||||
}
|
||||
|
||||
if (convoCount !== convosIndexed) {
|
||||
console.log('Convos out of sync, indexing');
|
||||
logger.debug('[indexSync] Convos out of sync, indexing');
|
||||
Conversation.syncWithMeili();
|
||||
}
|
||||
} catch (err) {
|
||||
// console.log('in index sync');
|
||||
// logger.debug('[indexSync] in index sync');
|
||||
if (err.message.includes('not found')) {
|
||||
console.log('Creating indices...');
|
||||
logger.debug('[indexSync] Creating indices...');
|
||||
currentTimeout = setTimeout(async () => {
|
||||
try {
|
||||
await Message.syncWithMeili();
|
||||
await Conversation.syncWithMeili();
|
||||
} catch (err) {
|
||||
console.error('Trouble creating indices, try restarting the server.');
|
||||
logger.error('[indexSync] Trouble creating indices, try restarting the server.', err);
|
||||
}
|
||||
}, 750);
|
||||
} else if (err.message.includes('Meilisearch not configured')) {
|
||||
logger.info('[indexSync] Meilisearch not configured, search will be disabled.');
|
||||
} else {
|
||||
console.error(err);
|
||||
logger.error('[indexSync] error', err);
|
||||
// res.status(500).json({ error: 'Server error' });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process.on('exit', () => {
|
||||
console.log('Clearing sync timeouts before exiting...');
|
||||
logger.debug('[indexSync] Clearing sync timeouts before exiting...');
|
||||
clearTimeout(currentTimeout);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,24 +1,30 @@
|
||||
const mongoose = require('mongoose');
|
||||
const balanceSchema = require('./schema/balance');
|
||||
const { getMultiplier } = require('./tx');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
balanceSchema.statics.check = async function ({ user, model, valueKey, tokenType, amount, debug }) {
|
||||
const multiplier = getMultiplier({ valueKey, tokenType, model });
|
||||
balanceSchema.statics.check = async function ({
|
||||
user,
|
||||
model,
|
||||
endpoint,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
}) {
|
||||
const multiplier = getMultiplier({ valueKey, tokenType, model, endpoint });
|
||||
const tokenCost = amount * multiplier;
|
||||
const { tokenCredits: balance } = (await this.findOne({ user }, 'tokenCredits').lean()) ?? {};
|
||||
|
||||
if (debug) {
|
||||
console.log('balance check', {
|
||||
user,
|
||||
model,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
debug,
|
||||
balance,
|
||||
multiplier,
|
||||
});
|
||||
}
|
||||
logger.debug('[Balance.check]', {
|
||||
user,
|
||||
model,
|
||||
endpoint,
|
||||
valueKey,
|
||||
tokenType,
|
||||
amount,
|
||||
balance,
|
||||
multiplier,
|
||||
});
|
||||
|
||||
if (!balance) {
|
||||
return {
|
||||
@@ -28,9 +34,7 @@ balanceSchema.statics.check = async function ({ user, model, valueKey, tokenType
|
||||
};
|
||||
}
|
||||
|
||||
if (debug) {
|
||||
console.log('balance check', { tokenCost });
|
||||
}
|
||||
logger.debug('[Balance.check]', { tokenCost });
|
||||
|
||||
return { canSpend: balance >= tokenCost, balance, tokenCost };
|
||||
};
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const major = [0, 0];
|
||||
const minor = [0, 0];
|
||||
const patch = [0, 5];
|
||||
@@ -69,7 +71,7 @@ module.exports = {
|
||||
try {
|
||||
return await Config.find(filter).lean();
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
logger.error('Error getting configs', error);
|
||||
return { config: 'Error getting configs' };
|
||||
}
|
||||
},
|
||||
@@ -77,7 +79,7 @@ module.exports = {
|
||||
try {
|
||||
return await Config.deleteMany(filter);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
logger.error('Error deleting configs', error);
|
||||
return { config: 'Error deleting configs' };
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
// const { Conversation } = require('./plugins');
|
||||
const Conversation = require('./schema/convoSchema');
|
||||
const { getMessages, deleteMessages } = require('./Message');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
const getConvo = async (user, conversationId) => {
|
||||
try {
|
||||
return await Conversation.findOne({ user, conversationId }).lean();
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[getConvo] Error getting single conversation', error);
|
||||
return { message: 'Error getting single conversation' };
|
||||
}
|
||||
};
|
||||
@@ -26,7 +26,7 @@ module.exports = {
|
||||
upsert: true,
|
||||
});
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[saveConvo] Error saving conversation', error);
|
||||
return { message: 'Error saving conversation' };
|
||||
}
|
||||
},
|
||||
@@ -41,7 +41,7 @@ module.exports = {
|
||||
.lean();
|
||||
return { conversations: convos, pages: totalPages, pageNumber, pageSize };
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[getConvosByPage] Error getting conversations', error);
|
||||
return { message: 'Error getting conversations' };
|
||||
}
|
||||
},
|
||||
@@ -87,7 +87,7 @@ module.exports = {
|
||||
convoMap,
|
||||
};
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[getConvosQueried] Error getting conversations', error);
|
||||
return { message: 'Error fetching conversations' };
|
||||
}
|
||||
},
|
||||
@@ -104,7 +104,7 @@ module.exports = {
|
||||
return convo?.title || 'New Chat';
|
||||
}
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[getConvoTitle] Error getting conversation title', error);
|
||||
return { message: 'Error getting conversation title' };
|
||||
}
|
||||
},
|
||||
@@ -123,7 +123,7 @@ module.exports = {
|
||||
* const user = 'someUserId';
|
||||
* const filter = { someField: 'someValue' };
|
||||
* const result = await deleteConvos(user, filter);
|
||||
* console.log(result); // { n: 5, ok: 1, deletedCount: 5, messages: { n: 10, ok: 1, deletedCount: 10 } }
|
||||
* logger.error(result); // { n: 5, ok: 1, deletedCount: 5, messages: { n: 10, ok: 1, deletedCount: 10 } }
|
||||
*/
|
||||
deleteConvos: async (user, filter) => {
|
||||
let toRemove = await Conversation.find({ ...filter, user }).select('conversationId');
|
||||
|
||||
96
api/models/File.js
Normal file
96
api/models/File.js
Normal file
@@ -0,0 +1,96 @@
|
||||
const mongoose = require('mongoose');
|
||||
const fileSchema = require('./schema/fileSchema');
|
||||
|
||||
const File = mongoose.model('File', fileSchema);
|
||||
|
||||
/**
|
||||
* Finds a file by its file_id with additional query options.
|
||||
* @param {string} file_id - The unique identifier of the file.
|
||||
* @param {object} options - Query options for filtering, projection, etc.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the file document or null.
|
||||
*/
|
||||
const findFileById = async (file_id, options = {}) => {
|
||||
return await File.findOne({ file_id, ...options }).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves files matching a given filter.
|
||||
* @param {Object} filter - The filter criteria to apply.
|
||||
* @returns {Promise<Array<MongoFile>>} A promise that resolves to an array of file documents.
|
||||
*/
|
||||
const getFiles = async (filter) => {
|
||||
return await File.find(filter).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new file with a TTL of 1 hour.
|
||||
* @param {MongoFile} data - The file data to be created, must contain file_id.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the created file document.
|
||||
*/
|
||||
const createFile = async (data) => {
|
||||
const fileData = {
|
||||
...data,
|
||||
expiresAt: new Date(Date.now() + 3600 * 1000),
|
||||
};
|
||||
return await File.findOneAndUpdate({ file_id: data.file_id }, fileData, {
|
||||
new: true,
|
||||
upsert: true,
|
||||
}).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Updates a file identified by file_id with new data and removes the TTL.
|
||||
* @param {MongoFile} data - The data to update, must contain file_id.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
|
||||
*/
|
||||
const updateFile = async (data) => {
|
||||
const { file_id, ...update } = data;
|
||||
const updateOperation = {
|
||||
$set: update,
|
||||
$unset: { expiresAt: '' }, // Remove the expiresAt field to prevent TTL
|
||||
};
|
||||
return await File.findOneAndUpdate({ file_id }, updateOperation, { new: true }).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Increments the usage of a file identified by file_id.
|
||||
* @param {MongoFile} data - The data to update, must contain file_id and the increment value for usage.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the updated file document.
|
||||
*/
|
||||
const updateFileUsage = async (data) => {
|
||||
const { file_id, inc = 1 } = data;
|
||||
const updateOperation = {
|
||||
$inc: { usage: inc },
|
||||
$unset: { expiresAt: '' },
|
||||
};
|
||||
return await File.findOneAndUpdate({ file_id }, updateOperation, { new: true }).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes a file identified by file_id.
|
||||
* @param {string} file_id - The unique identifier of the file to delete.
|
||||
* @returns {Promise<MongoFile>} A promise that resolves to the deleted file document or null.
|
||||
*/
|
||||
const deleteFile = async (file_id) => {
|
||||
return await File.findOneAndDelete({ file_id }).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes multiple files identified by an array of file_ids.
|
||||
* @param {Array<string>} file_ids - The unique identifiers of the files to delete.
|
||||
* @returns {Promise<Object>} A promise that resolves to the result of the deletion operation.
|
||||
*/
|
||||
const deleteFiles = async (file_ids) => {
|
||||
return await File.deleteMany({ file_id: { $in: file_ids } });
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
File,
|
||||
findFileById,
|
||||
getFiles,
|
||||
createFile,
|
||||
updateFile,
|
||||
updateFileUsage,
|
||||
deleteFile,
|
||||
deleteFiles,
|
||||
};
|
||||
@@ -1,5 +1,6 @@
|
||||
const { z } = require('zod');
|
||||
const Message = require('./schema/messageSchema');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
const idSchema = z.string().uuid();
|
||||
|
||||
@@ -18,6 +19,7 @@ module.exports = {
|
||||
error,
|
||||
unfinished,
|
||||
cancelled,
|
||||
files,
|
||||
isEdited = false,
|
||||
finish_reason = null,
|
||||
tokenCount = null,
|
||||
@@ -30,29 +32,31 @@ module.exports = {
|
||||
if (!validConvoId.success) {
|
||||
return;
|
||||
}
|
||||
|
||||
const update = {
|
||||
user,
|
||||
messageId: newMessageId || messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender,
|
||||
text,
|
||||
isCreatedByUser,
|
||||
isEdited,
|
||||
finish_reason,
|
||||
error,
|
||||
unfinished,
|
||||
cancelled,
|
||||
tokenCount,
|
||||
plugin,
|
||||
plugins,
|
||||
model,
|
||||
};
|
||||
|
||||
if (files) {
|
||||
update.files = files;
|
||||
}
|
||||
// may also need to update the conversation here
|
||||
await Message.findOneAndUpdate(
|
||||
{ messageId },
|
||||
{
|
||||
user,
|
||||
messageId: newMessageId || messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender,
|
||||
text,
|
||||
isCreatedByUser,
|
||||
isEdited,
|
||||
finish_reason,
|
||||
error,
|
||||
unfinished,
|
||||
cancelled,
|
||||
tokenCount,
|
||||
plugin,
|
||||
plugins,
|
||||
model,
|
||||
},
|
||||
{ upsert: true, new: true },
|
||||
);
|
||||
await Message.findOneAndUpdate({ messageId }, update, { upsert: true, new: true });
|
||||
|
||||
return {
|
||||
messageId,
|
||||
@@ -64,7 +68,7 @@ module.exports = {
|
||||
tokenCount,
|
||||
};
|
||||
} catch (err) {
|
||||
console.error(`Error saving message: ${err}`);
|
||||
logger.error('Error saving message:', err);
|
||||
throw new Error('Failed to save message.');
|
||||
}
|
||||
},
|
||||
@@ -89,7 +93,7 @@ module.exports = {
|
||||
isEdited: true,
|
||||
};
|
||||
} catch (err) {
|
||||
console.error(`Error updating message: ${err}`);
|
||||
logger.error('Error updating message:', err);
|
||||
throw new Error('Failed to update message.');
|
||||
}
|
||||
},
|
||||
@@ -103,7 +107,7 @@ module.exports = {
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
console.error(`Error deleting messages: ${err}`);
|
||||
logger.error('Error deleting messages:', err);
|
||||
throw new Error('Failed to delete messages.');
|
||||
}
|
||||
},
|
||||
@@ -112,7 +116,7 @@ module.exports = {
|
||||
try {
|
||||
return await Message.find(filter).sort({ createdAt: 1 }).lean();
|
||||
} catch (err) {
|
||||
console.error(`Error getting messages: ${err}`);
|
||||
logger.error('Error getting messages:', err);
|
||||
throw new Error('Failed to get messages.');
|
||||
}
|
||||
},
|
||||
@@ -121,7 +125,7 @@ module.exports = {
|
||||
try {
|
||||
return await Message.deleteMany(filter);
|
||||
} catch (err) {
|
||||
console.error(`Error deleting messages: ${err}`);
|
||||
logger.error('Error deleting messages:', err);
|
||||
throw new Error('Failed to delete messages.');
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
const Preset = require('./schema/presetSchema');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const getPreset = async (user, presetId) => {
|
||||
try {
|
||||
return await Preset.findOne({ user, presetId }).lean();
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[getPreset] Error getting single preset', error);
|
||||
return { message: 'Error getting single preset' };
|
||||
}
|
||||
};
|
||||
@@ -14,26 +15,55 @@ module.exports = {
|
||||
getPreset,
|
||||
getPresets: async (user, filter) => {
|
||||
try {
|
||||
return await Preset.find({ ...filter, user }).lean();
|
||||
const presets = await Preset.find({ ...filter, user }).lean();
|
||||
const defaultValue = 10000;
|
||||
|
||||
presets.sort((a, b) => {
|
||||
let orderA = a.order !== undefined ? a.order : defaultValue;
|
||||
let orderB = b.order !== undefined ? b.order : defaultValue;
|
||||
|
||||
if (orderA !== orderB) {
|
||||
return orderA - orderB;
|
||||
}
|
||||
|
||||
return b.updatedAt - a.updatedAt;
|
||||
});
|
||||
|
||||
return presets;
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[getPresets] Error getting presets', error);
|
||||
return { message: 'Error retrieving presets' };
|
||||
}
|
||||
},
|
||||
savePreset: async (user, { presetId, newPresetId, ...preset }) => {
|
||||
savePreset: async (user, { presetId, newPresetId, defaultPreset, ...preset }) => {
|
||||
try {
|
||||
const setter = { $set: {} };
|
||||
const update = { presetId, ...preset };
|
||||
if (newPresetId) {
|
||||
update.presetId = newPresetId;
|
||||
}
|
||||
|
||||
return await Preset.findOneAndUpdate(
|
||||
{ presetId, user },
|
||||
{ $set: update },
|
||||
{ new: true, upsert: true },
|
||||
);
|
||||
if (defaultPreset) {
|
||||
update.defaultPreset = defaultPreset;
|
||||
update.order = 0;
|
||||
|
||||
const currentDefault = await Preset.findOne({ defaultPreset: true, user });
|
||||
|
||||
if (currentDefault && currentDefault.presetId !== presetId) {
|
||||
await Preset.findByIdAndUpdate(currentDefault._id, {
|
||||
$unset: { defaultPreset: '', order: '' },
|
||||
});
|
||||
}
|
||||
} else if (defaultPreset === false) {
|
||||
update.defaultPreset = undefined;
|
||||
update.order = undefined;
|
||||
setter['$unset'] = { defaultPreset: '', order: '' };
|
||||
}
|
||||
|
||||
setter.$set = update;
|
||||
return await Preset.findOneAndUpdate({ presetId, user }, setter, { new: true, upsert: true });
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
logger.error('[savePreset] Error saving preset', error);
|
||||
return { message: 'Error saving preset' };
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const promptSchema = mongoose.Schema(
|
||||
{
|
||||
@@ -28,7 +29,7 @@ module.exports = {
|
||||
});
|
||||
return { title, prompt };
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
logger.error('Error saving prompt', error);
|
||||
return { prompt: 'Error saving prompt' };
|
||||
}
|
||||
},
|
||||
@@ -36,7 +37,7 @@ module.exports = {
|
||||
try {
|
||||
return await Prompt.find(filter).lean();
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
logger.error('Error getting prompts', error);
|
||||
return { prompt: 'Error getting prompts' };
|
||||
}
|
||||
},
|
||||
@@ -44,7 +45,7 @@ module.exports = {
|
||||
try {
|
||||
return await Prompt.deleteMany(filter);
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
logger.error('Error deleting prompts', error);
|
||||
return { prompt: 'Error deleting prompts' };
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const crypto = require('crypto');
|
||||
const signPayload = require('../server/services/signPayload');
|
||||
const mongoose = require('mongoose');
|
||||
const signPayload = require('~/server/services/signPayload');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
|
||||
const expires = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7;
|
||||
|
||||
@@ -44,8 +46,8 @@ sessionSchema.methods.generateRefreshToken = async function () {
|
||||
|
||||
return refreshToken;
|
||||
} catch (error) {
|
||||
console.error(
|
||||
'Error generating refresh token. Have you set a JWT_REFRESH_SECRET in the .env file?\n\n',
|
||||
logger.error(
|
||||
'Error generating refresh token. Is a `JWT_REFRESH_SECRET` set in the .env file?\n\n',
|
||||
error,
|
||||
);
|
||||
throw error;
|
||||
@@ -59,10 +61,12 @@ sessionSchema.statics.deleteAllUserSessions = async function (userId) {
|
||||
}
|
||||
const result = await this.deleteMany({ user: userId });
|
||||
if (result && result?.deletedCount > 0) {
|
||||
console.log(`Deleted ${result.deletedCount} sessions for user ${userId}.`);
|
||||
logger.debug(
|
||||
`[deleteAllUserSessions] Deleted ${result.deletedCount} sessions for user ${userId}.`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('Error in deleting user sessions:', error);
|
||||
logger.error('[deleteAllUserSessions] Error in deleting user sessions:', error);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -39,7 +39,7 @@ transactionSchema.statics.create = async function (transactionData) {
|
||||
{ user: transaction.user },
|
||||
{ $inc: { tokenCredits: transaction.tokenValue } },
|
||||
{ upsert: true, new: true },
|
||||
);
|
||||
).lean();
|
||||
};
|
||||
|
||||
module.exports = mongoose.model('Transaction', transactionSchema);
|
||||
|
||||
@@ -7,13 +7,12 @@ const { logViolation } = require('../cache');
|
||||
* @async
|
||||
* @function
|
||||
* @param {Object} params - The function parameters.
|
||||
* @param {Object} params.req - The Express request object.
|
||||
* @param {Object} params.res - The Express response object.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {Express.Response} params.res - The Express response object.
|
||||
* @param {Object} params.txData - The transaction data.
|
||||
* @param {string} params.txData.user - The user ID or identifier.
|
||||
* @param {('prompt' | 'completion')} params.txData.tokenType - The type of token.
|
||||
* @param {number} params.txData.amount - The amount of tokens.
|
||||
* @param {boolean} params.txData.debug - Debug flag.
|
||||
* @param {string} params.txData.model - The model name or identifier.
|
||||
* @returns {Promise<boolean>} Returns true if the user can spend the amount, otherwise denies the request.
|
||||
* @throws {Error} Throws an error if there's an issue with the balance check.
|
||||
|
||||
@@ -7,6 +7,15 @@ const {
|
||||
} = require('./Message');
|
||||
const { getConvoTitle, getConvo, saveConvo, deleteConvos } = require('./Conversation');
|
||||
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
|
||||
const {
|
||||
findFileById,
|
||||
createFile,
|
||||
updateFile,
|
||||
deleteFile,
|
||||
deleteFiles,
|
||||
getFiles,
|
||||
updateFileUsage,
|
||||
} = require('./File');
|
||||
const Key = require('./Key');
|
||||
const User = require('./User');
|
||||
const Session = require('./Session');
|
||||
@@ -35,4 +44,12 @@ module.exports = {
|
||||
getPresets,
|
||||
savePreset,
|
||||
deletePresets,
|
||||
|
||||
findFileById,
|
||||
createFile,
|
||||
updateFile,
|
||||
deleteFile,
|
||||
deleteFiles,
|
||||
getFiles,
|
||||
updateFileUsage,
|
||||
};
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
const _ = require('lodash');
|
||||
const mongoose = require('mongoose');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const { cleanUpPrimaryKeyValue } = require('../../lib/utils/misc');
|
||||
const _ = require('lodash');
|
||||
const { cleanUpPrimaryKeyValue } = require('~/lib/utils/misc');
|
||||
const logger = require('~/config/meiliLogger');
|
||||
|
||||
const searchEnabled = process.env.SEARCH && process.env.SEARCH.toLowerCase() === 'true';
|
||||
const meiliEnabled = process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY && searchEnabled;
|
||||
|
||||
@@ -64,8 +66,7 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
offset += batchSize;
|
||||
}
|
||||
|
||||
console.log('indexMap', indexMap.size);
|
||||
console.log('mongoMap', mongoMap.size);
|
||||
logger.debug('[syncWithMeili]', { indexMap: indexMap.size, mongoMap: mongoMap.size });
|
||||
|
||||
const updateOps = [];
|
||||
|
||||
@@ -80,7 +81,11 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
(doc.text && doc.text !== mongoMap.get(id).text) ||
|
||||
(doc.title && doc.title !== mongoMap.get(id).title)
|
||||
) {
|
||||
console.log(`${id} had document discrepancy in ${doc.text ? 'text' : 'title'} field`);
|
||||
logger.debug(
|
||||
`[syncWithMeili] ${id} had document discrepancy in ${
|
||||
doc.text ? 'text' : 'title'
|
||||
} field`,
|
||||
);
|
||||
updateOps.push({
|
||||
updateOne: { filter: update, update: { $set: { _meiliIndex: true } } },
|
||||
});
|
||||
@@ -116,15 +121,14 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
|
||||
if (updateOps.length > 0) {
|
||||
await this.collection.bulkWrite(updateOps);
|
||||
console.log(
|
||||
`[Meilisearch] Finished indexing ${
|
||||
logger.debug(
|
||||
`[syncWithMeili] Finished indexing ${
|
||||
primaryKey === 'messageId' ? 'messages' : 'conversations'
|
||||
}`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
console.log('[Meilisearch] Error adding document to Meili');
|
||||
console.error(error);
|
||||
logger.error('[syncWithMeili] Error adding document to Meili', error);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,7 +147,7 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
const query = {};
|
||||
// query[primaryKey] = { $in: _.map(data.hits, primaryKey) };
|
||||
query[primaryKey] = _.map(data.hits, (hit) => cleanUpPrimaryKeyValue(hit[primaryKey]));
|
||||
// console.log('query', query);
|
||||
// logger.debug('query', query);
|
||||
const hitsFromMongoose = await this.find(
|
||||
query,
|
||||
_.reduce(
|
||||
@@ -186,11 +190,11 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
async addObjectToMeili() {
|
||||
const object = this.preprocessObjectForIndex();
|
||||
try {
|
||||
// console.log('Adding document to Meili', object);
|
||||
// logger.debug('Adding document to Meili', object);
|
||||
await index.addDocuments([object]);
|
||||
} catch (error) {
|
||||
// console.log('Error adding document to Meili');
|
||||
// console.error(error);
|
||||
// logger.debug('Error adding document to Meili');
|
||||
// logger.error(error);
|
||||
}
|
||||
|
||||
await this.collection.updateMany({ _id: this._id }, { $set: { _meiliIndex: true } });
|
||||
@@ -311,10 +315,10 @@ module.exports = function mongoMeili(schema, options) {
|
||||
return next();
|
||||
} catch (error) {
|
||||
if (meiliEnabled) {
|
||||
console.log(
|
||||
'[Meilisearch] There was an issue deleting conversation indexes upon deletion, next startup may be slow due to syncing',
|
||||
logger.error(
|
||||
'[MeiliMongooseModel.deleteMany] There was an issue deleting conversation indexes upon deletion, next startup may be slow due to syncing',
|
||||
error,
|
||||
);
|
||||
console.error(error);
|
||||
}
|
||||
return next();
|
||||
}
|
||||
@@ -335,7 +339,11 @@ module.exports = function mongoMeili(schema, options) {
|
||||
try {
|
||||
meiliDoc = await client.index('convos').getDocument(doc.conversationId);
|
||||
} catch (error) {
|
||||
console.log('[Meilisearch] Convo not found and will index', doc.conversationId);
|
||||
logger.error(
|
||||
'[MeiliMongooseModel.findOneAndUpdate] Convo not found in MeiliSearch and will index ' +
|
||||
doc.conversationId,
|
||||
error,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -18,36 +18,36 @@ const convoSchema = mongoose.Schema(
|
||||
user: {
|
||||
type: String,
|
||||
index: true,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
|
||||
// google only
|
||||
examples: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
agentOptions: {
|
||||
type: mongoose.Schema.Types.Mixed,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
...conversationPreset,
|
||||
// for bingAI only
|
||||
bingConversationId: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
jailbreakConversationId: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
conversationSignature: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
clientId: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
invocationId: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
// default: 1,
|
||||
},
|
||||
},
|
||||
{ timestamps: true },
|
||||
|
||||
@@ -8,147 +8,147 @@ const conversationPreset = {
|
||||
// for azureOpenAI, openAI, chatGPTBrowser only
|
||||
model: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
// for azureOpenAI, openAI only
|
||||
chatGptLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
modelLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
promptPrefix: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
temperature: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
// default: 1,
|
||||
required: false,
|
||||
},
|
||||
top_p: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
// default: 1,
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
topP: {
|
||||
type: Number,
|
||||
default: 0.95,
|
||||
// default: 0.95,
|
||||
required: false,
|
||||
},
|
||||
topK: {
|
||||
type: Number,
|
||||
default: 40,
|
||||
// default: 40,
|
||||
required: false,
|
||||
},
|
||||
maxOutputTokens: {
|
||||
type: Number,
|
||||
default: 1024,
|
||||
// default: 1024,
|
||||
required: false,
|
||||
},
|
||||
presence_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
// default: 0,
|
||||
required: false,
|
||||
},
|
||||
frequency_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
// default: 0,
|
||||
required: false,
|
||||
},
|
||||
// for bingai only
|
||||
jailbreak: {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
// default: false,
|
||||
},
|
||||
context: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
systemMessage: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
toneStyle: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
};
|
||||
|
||||
const agentOptions = {
|
||||
model: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
// for azureOpenAI, openAI only
|
||||
chatGptLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
modelLabel: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
promptPrefix: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
required: false,
|
||||
},
|
||||
temperature: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
// default: 1,
|
||||
required: false,
|
||||
},
|
||||
top_p: {
|
||||
type: Number,
|
||||
default: 1,
|
||||
// default: 1,
|
||||
required: false,
|
||||
},
|
||||
// for google only
|
||||
topP: {
|
||||
type: Number,
|
||||
default: 0.95,
|
||||
// default: 0.95,
|
||||
required: false,
|
||||
},
|
||||
topK: {
|
||||
type: Number,
|
||||
default: 40,
|
||||
// default: 40,
|
||||
required: false,
|
||||
},
|
||||
maxOutputTokens: {
|
||||
type: Number,
|
||||
default: 1024,
|
||||
// default: 1024,
|
||||
required: false,
|
||||
},
|
||||
presence_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
// default: 0,
|
||||
required: false,
|
||||
},
|
||||
frequency_penalty: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
// default: 0,
|
||||
required: false,
|
||||
},
|
||||
context: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
systemMessage: {
|
||||
type: String,
|
||||
default: null,
|
||||
// default: null,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
79
api/models/schema/fileSchema.js
Normal file
79
api/models/schema/fileSchema.js
Normal file
@@ -0,0 +1,79 @@
|
||||
const mongoose = require('mongoose');
|
||||
|
||||
/**
|
||||
* @typedef {Object} MongoFile
|
||||
* @property {mongoose.Schema.Types.ObjectId} user - User ID
|
||||
* @property {string} [conversationId] - Optional conversation ID
|
||||
* @property {string} file_id - File identifier
|
||||
* @property {string} [temp_file_id] - Temporary File identifier
|
||||
* @property {number} bytes - Size of the file in bytes
|
||||
* @property {string} filename - Name of the file
|
||||
* @property {string} filepath - Location of the file
|
||||
* @property {'file'} object - Type of object, always 'file'
|
||||
* @property {string} type - Type of file
|
||||
* @property {number} usage - Number of uses of the file
|
||||
* @property {number} [width] - Optional width of the file
|
||||
* @property {number} [height] - Optional height of the file
|
||||
* @property {Date} [expiresAt] - Optional height of the file
|
||||
*/
|
||||
const fileSchema = mongoose.Schema(
|
||||
{
|
||||
user: {
|
||||
type: mongoose.Schema.Types.ObjectId,
|
||||
ref: 'User',
|
||||
index: true,
|
||||
required: true,
|
||||
},
|
||||
conversationId: {
|
||||
type: String,
|
||||
ref: 'Conversation',
|
||||
index: true,
|
||||
},
|
||||
file_id: {
|
||||
type: String,
|
||||
// required: true,
|
||||
index: true,
|
||||
},
|
||||
temp_file_id: {
|
||||
type: String,
|
||||
// required: true,
|
||||
},
|
||||
bytes: {
|
||||
type: Number,
|
||||
required: true,
|
||||
},
|
||||
usage: {
|
||||
type: Number,
|
||||
required: true,
|
||||
default: 0,
|
||||
},
|
||||
filename: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
filepath: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
object: {
|
||||
type: String,
|
||||
required: true,
|
||||
default: 'file',
|
||||
},
|
||||
type: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
width: Number,
|
||||
height: Number,
|
||||
expiresAt: {
|
||||
type: Date,
|
||||
expires: 3600,
|
||||
},
|
||||
},
|
||||
{
|
||||
timestamps: true,
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = fileSchema;
|
||||
@@ -1,5 +1,5 @@
|
||||
const mongoose = require('mongoose');
|
||||
const mongoMeili = require('../plugins/mongoMeili');
|
||||
const mongoMeili = require('~/models/plugins/mongoMeili');
|
||||
const messageSchema = mongoose.Schema(
|
||||
{
|
||||
messageId: {
|
||||
@@ -85,6 +85,7 @@ const messageSchema = mongoose.Schema(
|
||||
select: false,
|
||||
default: false,
|
||||
},
|
||||
files: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
plugin: {
|
||||
latest: {
|
||||
type: String,
|
||||
|
||||
@@ -17,6 +17,12 @@ const presetSchema = mongoose.Schema(
|
||||
type: String,
|
||||
default: null,
|
||||
},
|
||||
defaultPreset: {
|
||||
type: Boolean,
|
||||
},
|
||||
order: {
|
||||
type: Number,
|
||||
},
|
||||
// google only
|
||||
examples: [{ type: mongoose.Schema.Types.Mixed }],
|
||||
...conversationPreset,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const Transaction = require('./Transaction');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Creates up to two transactions to record the spending of tokens.
|
||||
@@ -30,7 +31,7 @@ const spendTokens = async (txData, tokenUsage) => {
|
||||
}
|
||||
|
||||
if (!completionTokens) {
|
||||
this.debug && console.dir({ prompt, completion }, { depth: null });
|
||||
logger.debug('[spendTokens] !completionTokens', { prompt, completion });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -40,9 +41,9 @@ const spendTokens = async (txData, tokenUsage) => {
|
||||
rawAmount: -completionTokens,
|
||||
});
|
||||
|
||||
this.debug && console.dir({ prompt, completion }, { depth: null });
|
||||
logger.debug('[spendTokens] post-transaction', { prompt, completion });
|
||||
} catch (err) {
|
||||
console.error(err);
|
||||
logger.error('[spendTokens]', err);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -18,10 +18,11 @@ const tokenValues = {
|
||||
* Retrieves the key associated with a given model name.
|
||||
*
|
||||
* @param {string} model - The model name to match.
|
||||
* @param {string} endpoint - The endpoint name to match.
|
||||
* @returns {string|undefined} The key corresponding to the model name, or undefined if no match is found.
|
||||
*/
|
||||
const getValueKey = (model) => {
|
||||
const modelName = matchModelName(model);
|
||||
const getValueKey = (model, endpoint) => {
|
||||
const modelName = matchModelName(model, endpoint);
|
||||
if (!modelName) {
|
||||
return undefined;
|
||||
}
|
||||
@@ -51,9 +52,10 @@ const getValueKey = (model) => {
|
||||
* @param {string} [params.valueKey] - The key corresponding to the model name.
|
||||
* @param {string} [params.tokenType] - The type of token (e.g., 'prompt' or 'completion').
|
||||
* @param {string} [params.model] - The model name to derive the value key from if not provided.
|
||||
* @param {string} [params.endpoint] - The endpoint name to derive the value key from if not provided.
|
||||
* @returns {number} The multiplier for the given parameters, or a default value if not found.
|
||||
*/
|
||||
const getMultiplier = ({ valueKey, tokenType, model }) => {
|
||||
const getMultiplier = ({ valueKey, tokenType, model, endpoint }) => {
|
||||
if (valueKey && tokenType) {
|
||||
return tokenValues[valueKey][tokenType] ?? defaultRate;
|
||||
}
|
||||
@@ -62,7 +64,7 @@ const getMultiplier = ({ valueKey, tokenType, model }) => {
|
||||
return 1;
|
||||
}
|
||||
|
||||
valueKey = getValueKey(model);
|
||||
valueKey = getValueKey(model, endpoint);
|
||||
if (!valueKey) {
|
||||
return defaultRate;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user