Compare commits
41 Commits
v0.8.1-rc1
...
refactor/p
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
148052c473 | ||
|
|
331014cc98 | ||
|
|
9c0deed34a | ||
|
|
226bd90ede | ||
|
|
4fea3d4274 | ||
|
|
ad6716f6ef | ||
|
|
b2f7f5c904 | ||
|
|
4ae1d82a75 | ||
|
|
494c6d2596 | ||
|
|
6f4c8ef114 | ||
|
|
a4c6553695 | ||
|
|
edb977c1bc | ||
|
|
8ec7781672 | ||
|
|
99731e98dd | ||
|
|
f57d920bd5 | ||
|
|
3831ad8202 | ||
|
|
6e278f6932 | ||
|
|
90ac2b51cd | ||
|
|
20ad7d52f3 | ||
|
|
eb368fcb70 | ||
|
|
7cf3f98475 | ||
|
|
ab5450be8b | ||
|
|
c682d45fb2 | ||
|
|
5fb6b91e71 | ||
|
|
76e070048c | ||
|
|
728d19e361 | ||
|
|
2d492b932f | ||
|
|
c201d54cac | ||
|
|
a2a3f5c044 | ||
|
|
f9c0e9853f | ||
|
|
fa9177180f | ||
|
|
f6ca8caf7e | ||
|
|
30b8a1c6c4 | ||
|
|
848cb6f871 | ||
|
|
ea459749f9 | ||
|
|
63c56c8dd9 | ||
|
|
7caffda81a | ||
|
|
0cb5ed4063 | ||
|
|
85d0688f38 | ||
|
|
2c14fe1e9a | ||
|
|
4049b5572c |
149
.env.example
149
.env.example
@@ -15,20 +15,6 @@ HOST=localhost
|
||||
PORT=3080
|
||||
|
||||
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
|
||||
#The maximum number of connections in the connection pool. */
|
||||
MONGO_MAX_POOL_SIZE=
|
||||
#The minimum number of connections in the connection pool. */
|
||||
MONGO_MIN_POOL_SIZE=
|
||||
#The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
|
||||
MONGO_MAX_CONNECTING=
|
||||
#The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed. */
|
||||
MONGO_MAX_IDLE_TIME_MS=
|
||||
#The maximum time in milliseconds that a thread can wait for a connection to become available. */
|
||||
MONGO_WAIT_QUEUE_TIMEOUT_MS=
|
||||
# Set to false to disable automatic index creation for all models associated with this connection. */
|
||||
MONGO_AUTO_INDEX=
|
||||
# Set to `false` to disable Mongoose automatically calling `createCollection()` on every model created on this connection. */
|
||||
MONGO_AUTO_CREATE=
|
||||
|
||||
DOMAIN_CLIENT=http://localhost:3080
|
||||
DOMAIN_SERVER=http://localhost:3080
|
||||
@@ -40,13 +26,6 @@ NO_INDEX=true
|
||||
# Defaulted to 1.
|
||||
TRUST_PROXY=1
|
||||
|
||||
# Minimum password length for user authentication
|
||||
# Default: 8
|
||||
# Note: When using LDAP authentication, you may want to set this to 1
|
||||
# to bypass local password validation, as LDAP servers handle their own
|
||||
# password policies.
|
||||
# MIN_PASSWORD_LENGTH=8
|
||||
|
||||
#===============#
|
||||
# JSON Logging #
|
||||
#===============#
|
||||
@@ -79,7 +58,7 @@ DEBUG_CONSOLE=false
|
||||
# Endpoints #
|
||||
#===================================================#
|
||||
|
||||
# ENDPOINTS=openAI,assistants,azureOpenAI,google,anthropic
|
||||
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic
|
||||
|
||||
PROXY=
|
||||
|
||||
@@ -163,10 +142,10 @@ GOOGLE_KEY=user_provided
|
||||
# GOOGLE_AUTH_HEADER=true
|
||||
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
|
||||
|
||||
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
||||
|
||||
@@ -196,7 +175,7 @@ GOOGLE_KEY=user_provided
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=user_provided
|
||||
# OPENAI_MODELS=gpt-5,gpt-5-codex,gpt-5-mini,gpt-5-nano,o3-pro,o3,o4-mini,gpt-4.1,gpt-4.1-mini,gpt-4.1-nano,o3-mini,o1-pro,o1,gpt-4o,gpt-4o-mini
|
||||
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
@@ -370,11 +349,6 @@ REGISTRATION_VIOLATION_SCORE=1
|
||||
CONCURRENT_VIOLATION_SCORE=1
|
||||
MESSAGE_VIOLATION_SCORE=1
|
||||
NON_BROWSER_VIOLATION_SCORE=20
|
||||
TTS_VIOLATION_SCORE=0
|
||||
STT_VIOLATION_SCORE=0
|
||||
FORK_VIOLATION_SCORE=0
|
||||
IMPORT_VIOLATION_SCORE=0
|
||||
FILE_UPLOAD_VIOLATION_SCORE=0
|
||||
|
||||
LOGIN_MAX=7
|
||||
LOGIN_WINDOW=5
|
||||
@@ -459,15 +433,10 @@ OPENID_CALLBACK_URL=/oauth/openid/callback
|
||||
OPENID_REQUIRED_ROLE=
|
||||
OPENID_REQUIRED_ROLE_TOKEN_KIND=
|
||||
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
|
||||
OPENID_ADMIN_ROLE=
|
||||
OPENID_ADMIN_ROLE_PARAMETER_PATH=
|
||||
OPENID_ADMIN_ROLE_TOKEN_KIND=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's username
|
||||
OPENID_USERNAME_CLAIM=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's name
|
||||
OPENID_NAME_CLAIM=
|
||||
# Optional audience parameter for OpenID authorization requests
|
||||
OPENID_AUDIENCE=
|
||||
|
||||
OPENID_BUTTON_LABEL=
|
||||
OPENID_IMAGE_URL=
|
||||
@@ -484,26 +453,11 @@ OPENID_REUSE_TOKENS=
|
||||
OPENID_JWKS_URL_CACHE_ENABLED=
|
||||
OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
|
||||
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
|
||||
OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED=
|
||||
OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API
|
||||
OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED=
|
||||
OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE = "user.read" # example for Scope Needed for Microsoft Graph API
|
||||
# Set to true to use the OpenID Connect end session endpoint for logout
|
||||
OPENID_USE_END_SESSION_ENDPOINT=
|
||||
|
||||
#========================#
|
||||
# SharePoint Integration #
|
||||
#========================#
|
||||
# Requires Entra ID (OpenID) authentication to be configured
|
||||
|
||||
# Enable SharePoint file picker in chat and agent panels
|
||||
# ENABLE_SHAREPOINT_FILEPICKER=true
|
||||
|
||||
# SharePoint tenant base URL (e.g., https://yourtenant.sharepoint.com)
|
||||
# SHAREPOINT_BASE_URL=https://yourtenant.sharepoint.com
|
||||
|
||||
# Microsoft Graph API And SharePoint scopes for file picker
|
||||
# SHAREPOINT_PICKER_SHAREPOINT_SCOPE==https://yourtenant.sharepoint.com/AllSites.Read
|
||||
# SHAREPOINT_PICKER_GRAPH_SCOPE=Files.Read.All
|
||||
#========================#
|
||||
|
||||
# SAML
|
||||
# Note: If OpenID is enabled, SAML authentication will be automatically disabled.
|
||||
@@ -531,21 +485,6 @@ SAML_IMAGE_URL=
|
||||
# SAML_USE_AUTHN_RESPONSE_SIGNED=
|
||||
|
||||
|
||||
#===============================================#
|
||||
# Microsoft Graph API / Entra ID Integration #
|
||||
#===============================================#
|
||||
|
||||
# Enable Entra ID people search integration in permissions/sharing system
|
||||
# When enabled, the people picker will search both local database and Entra ID
|
||||
USE_ENTRA_ID_FOR_PEOPLE_SEARCH=false
|
||||
|
||||
# When enabled, entra id groups owners will be considered as members of the group
|
||||
ENTRA_ID_INCLUDE_OWNERS_AS_MEMBERS=false
|
||||
|
||||
# Microsoft Graph API scopes needed for people/group search
|
||||
# Default scopes provide access to user profiles and group memberships
|
||||
OPENID_GRAPH_SCOPES=User.Read,People.Read,GroupMember.Read.All
|
||||
|
||||
# LDAP
|
||||
LDAP_URL=
|
||||
LDAP_BIND_DN=
|
||||
@@ -576,18 +515,6 @@ EMAIL_PASSWORD=
|
||||
EMAIL_FROM_NAME=
|
||||
EMAIL_FROM=noreply@librechat.ai
|
||||
|
||||
#========================#
|
||||
# Mailgun API #
|
||||
#========================#
|
||||
|
||||
# MAILGUN_API_KEY=your-mailgun-api-key
|
||||
# MAILGUN_DOMAIN=mg.yourdomain.com
|
||||
# EMAIL_FROM=noreply@yourdomain.com
|
||||
# EMAIL_FROM_NAME="LibreChat"
|
||||
|
||||
# # Optional: For EU region
|
||||
# MAILGUN_HOST=https://api.eu.mailgun.net
|
||||
|
||||
#========================#
|
||||
# Firebase CDN #
|
||||
#========================#
|
||||
@@ -636,10 +563,6 @@ ALLOW_SHARED_LINKS_PUBLIC=true
|
||||
# If you have another service in front of your LibreChat doing compression, disable express based compression here
|
||||
# DISABLE_COMPRESSION=true
|
||||
|
||||
# If you have gzipped version of uploaded image images in the same folder, this will enable gzip scan and serving of these images
|
||||
# Note: The images folder will be scanned on startup and a ma kept in memory. Be careful for large number of images.
|
||||
# ENABLE_IMAGE_OUTPUT_GZIP_SCAN=true
|
||||
|
||||
#===================================================#
|
||||
# UI #
|
||||
#===================================================#
|
||||
@@ -653,54 +576,15 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||
# Google tag manager id
|
||||
#ANALYTICS_GTM_ID=user provided google tag manager id
|
||||
|
||||
# limit conversation file imports to a certain number of bytes in size to avoid the container
|
||||
# maxing out memory limitations by unremarking this line and supplying a file size in bytes
|
||||
# such as the below example of 250 mib
|
||||
# CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES=262144000
|
||||
|
||||
|
||||
#===============#
|
||||
# REDIS Options #
|
||||
#===============#
|
||||
|
||||
# Enable Redis for caching and session storage
|
||||
# REDIS_URI=10.10.10.10:6379
|
||||
# USE_REDIS=true
|
||||
|
||||
# Single Redis instance
|
||||
# REDIS_URI=redis://127.0.0.1:6379
|
||||
|
||||
# Redis cluster (multiple nodes)
|
||||
# REDIS_URI=redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
|
||||
|
||||
# Redis with TLS/SSL encryption and CA certificate
|
||||
# REDIS_URI=rediss://127.0.0.1:6380
|
||||
# REDIS_CA=/path/to/ca-cert.pem
|
||||
|
||||
# Elasticache may need to use an alternate dnsLookup for TLS connections. see "Special Note: Aws Elasticache Clusters with TLS" on this webpage: https://www.npmjs.com/package/ioredis
|
||||
# Enable alternative dnsLookup for redis
|
||||
# REDIS_USE_ALTERNATIVE_DNS_LOOKUP=true
|
||||
|
||||
# Redis authentication (if required)
|
||||
# REDIS_USERNAME=your_redis_username
|
||||
# REDIS_PASSWORD=your_redis_password
|
||||
|
||||
# Redis key prefix configuration
|
||||
# Use environment variable name for dynamic prefix (recommended for cloud deployments)
|
||||
# REDIS_KEY_PREFIX_VAR=K_REVISION
|
||||
# Or use static prefix directly
|
||||
# REDIS_KEY_PREFIX=librechat
|
||||
|
||||
# Redis connection limits
|
||||
# REDIS_MAX_LISTENERS=40
|
||||
|
||||
# Redis ping interval in seconds (0 = disabled, >0 = enabled)
|
||||
# When set to a positive integer, Redis clients will ping the server at this interval to keep connections alive
|
||||
# When unset or 0, no pinging is performed (recommended for most use cases)
|
||||
# REDIS_PING_INTERVAL=300
|
||||
|
||||
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
|
||||
# Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES)
|
||||
# FORCED_IN_MEMORY_CACHE_NAMESPACES=ROLES,MESSAGES
|
||||
# USE_REDIS_CLUSTER=true
|
||||
# REDIS_CA=/path/to/ca.crt
|
||||
|
||||
#==================================================#
|
||||
# Others #
|
||||
@@ -761,17 +645,4 @@ OPENWEATHER_API_KEY=
|
||||
# Reranker (Required)
|
||||
# JINA_API_KEY=your_jina_api_key
|
||||
# or
|
||||
# COHERE_API_KEY=your_cohere_api_key
|
||||
|
||||
#======================#
|
||||
# MCP Configuration #
|
||||
#======================#
|
||||
|
||||
# Treat 401/403 responses as OAuth requirement when no oauth metadata found
|
||||
# MCP_OAUTH_ON_AUTH_ERROR=true
|
||||
|
||||
# Timeout for OAuth detection requests in milliseconds
|
||||
# MCP_OAUTH_DETECTION_TIMEOUT=5000
|
||||
|
||||
# Cache connection status checks for this many milliseconds to avoid expensive verification
|
||||
# MCP_CONNECTION_CHECK_TTL=60000
|
||||
# COHERE_API_KEY=your_cohere_api_key
|
||||
8
.github/CONTRIBUTING.md
vendored
8
.github/CONTRIBUTING.md
vendored
@@ -30,8 +30,8 @@ Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
2. Install typescript globally: `npm i -g typescript`.
|
||||
3. Run `npm ci` to install dependencies.
|
||||
4. Build the data provider: `npm run build:data-provider`.
|
||||
5. Build data schemas: `npm run build:data-schemas`.
|
||||
6. Build API methods: `npm run build:api`.
|
||||
5. Build MCP: `npm run build:mcp`.
|
||||
6. Build data schemas: `npm run build:data-schemas`.
|
||||
7. Setup and run unit tests:
|
||||
- Copy `.env.test`: `cp api/test/.env.test.example api/test/.env.test`.
|
||||
- Run backend unit tests: `npm run test:api`.
|
||||
@@ -147,7 +147,7 @@ Apply the following naming conventions to branches, labels, and other Git-relate
|
||||
## 8. Module Import Conventions
|
||||
|
||||
- `npm` packages first,
|
||||
- from longest line (top) to shortest (bottom)
|
||||
- from shortest line (top) to longest (bottom)
|
||||
|
||||
- Followed by typescript types (pertains to data-provider and client workspaces)
|
||||
- longest line (top) to shortest (bottom)
|
||||
@@ -157,8 +157,6 @@ Apply the following naming conventions to branches, labels, and other Git-relate
|
||||
- longest line (top) to shortest (bottom)
|
||||
- imports with alias `~` treated the same as relative import with respect to line length
|
||||
|
||||
**Note:** ESLint will automatically enforce these import conventions when you run `npm run lint --fix` or through pre-commit hooks.
|
||||
|
||||
---
|
||||
|
||||
Please ensure that you adapt this summary to fit the specific context and nuances of your project.
|
||||
|
||||
14
.github/workflows/backend-review.yml
vendored
14
.github/workflows/backend-review.yml
vendored
@@ -7,7 +7,6 @@ on:
|
||||
- release/*
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'packages/**'
|
||||
jobs:
|
||||
tests_Backend:
|
||||
name: Run Backend unit tests
|
||||
@@ -37,12 +36,12 @@ jobs:
|
||||
- name: Install Data Provider Package
|
||||
run: npm run build:data-provider
|
||||
|
||||
- name: Install MCP Package
|
||||
run: npm run build:mcp
|
||||
|
||||
- name: Install Data Schemas Package
|
||||
run: npm run build:data-schemas
|
||||
|
||||
- name: Install API Package
|
||||
run: npm run build:api
|
||||
|
||||
- name: Create empty auth.json file
|
||||
run: |
|
||||
mkdir -p api/data
|
||||
@@ -67,8 +66,5 @@ jobs:
|
||||
- name: Run librechat-data-provider unit tests
|
||||
run: cd packages/data-provider && npm run test:ci
|
||||
|
||||
- name: Run @librechat/data-schemas unit tests
|
||||
run: cd packages/data-schemas && npm run test:ci
|
||||
|
||||
- name: Run @librechat/api unit tests
|
||||
run: cd packages/api && npm run test:ci
|
||||
- name: Run librechat-mcp unit tests
|
||||
run: cd packages/mcp && npm run test:ci
|
||||
78
.github/workflows/cache-integration-tests.yml
vendored
78
.github/workflows/cache-integration-tests.yml
vendored
@@ -1,78 +0,0 @@
|
||||
name: Cache Integration Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- release/*
|
||||
paths:
|
||||
- 'packages/api/src/cache/**'
|
||||
- 'redis-config/**'
|
||||
- '.github/workflows/cache-integration-tests.yml'
|
||||
|
||||
jobs:
|
||||
cache_integration_tests:
|
||||
name: Run Cache Integration Tests
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Use Node.js 20.x
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install Redis tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y redis-server redis-tools
|
||||
|
||||
- name: Start Single Redis Instance
|
||||
run: |
|
||||
redis-server --daemonize yes --port 6379
|
||||
sleep 2
|
||||
# Verify single Redis is running
|
||||
redis-cli -p 6379 ping || exit 1
|
||||
|
||||
- name: Start Redis Cluster
|
||||
working-directory: redis-config
|
||||
run: |
|
||||
chmod +x start-cluster.sh stop-cluster.sh
|
||||
./start-cluster.sh
|
||||
sleep 10
|
||||
# Verify cluster is running
|
||||
redis-cli -p 7001 cluster info || exit 1
|
||||
redis-cli -p 7002 cluster info || exit 1
|
||||
redis-cli -p 7003 cluster info || exit 1
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build packages
|
||||
run: |
|
||||
npm run build:data-provider
|
||||
npm run build:data-schemas
|
||||
npm run build:api
|
||||
|
||||
- name: Run cache integration tests
|
||||
working-directory: packages/api
|
||||
env:
|
||||
NODE_ENV: test
|
||||
USE_REDIS: true
|
||||
REDIS_URI: redis://127.0.0.1:6379
|
||||
REDIS_CLUSTER_URI: redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
|
||||
run: npm run test:cache:integration
|
||||
|
||||
- name: Stop Redis Cluster
|
||||
if: always()
|
||||
working-directory: redis-config
|
||||
run: ./stop-cluster.sh || true
|
||||
|
||||
- name: Stop Single Redis Instance
|
||||
if: always()
|
||||
run: redis-cli -p 6379 shutdown || true
|
||||
58
.github/workflows/client.yml
vendored
58
.github/workflows/client.yml
vendored
@@ -1,58 +0,0 @@
|
||||
name: Publish `@librechat/client` to NPM
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'packages/client/package.json'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
reason:
|
||||
description: 'Reason for manual trigger'
|
||||
required: false
|
||||
default: 'Manual publish requested'
|
||||
|
||||
jobs:
|
||||
build-and-publish:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
|
||||
- name: Install client dependencies
|
||||
run: cd packages/client && npm ci
|
||||
|
||||
- name: Build client
|
||||
run: cd packages/client && npm run build
|
||||
|
||||
- name: Set up npm authentication
|
||||
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
|
||||
|
||||
- name: Check version change
|
||||
id: check
|
||||
working-directory: packages/client
|
||||
run: |
|
||||
PACKAGE_VERSION=$(node -p "require('./package.json').version")
|
||||
PUBLISHED_VERSION=$(npm view @librechat/client version 2>/dev/null || echo "0.0.0")
|
||||
if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
|
||||
echo "No version change, skipping publish"
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Version changed, proceeding with publish"
|
||||
echo "skip=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Pack package
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
working-directory: packages/client
|
||||
run: npm pack
|
||||
|
||||
- name: Publish
|
||||
if: steps.check.outputs.skip != 'true'
|
||||
working-directory: packages/client
|
||||
run: npm publish *.tgz --access public
|
||||
12
.github/workflows/data-provider.yml
vendored
12
.github/workflows/data-provider.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Publish `librechat-data-provider` to NPM
|
||||
name: Node.js Package
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -6,12 +6,6 @@ on:
|
||||
- main
|
||||
paths:
|
||||
- 'packages/data-provider/package.json'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
reason:
|
||||
description: 'Reason for manual trigger'
|
||||
required: false
|
||||
default: 'Manual publish requested'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -20,7 +14,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 16
|
||||
- run: cd packages/data-provider && npm ci
|
||||
- run: cd packages/data-provider && npm run build
|
||||
|
||||
@@ -31,7 +25,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 16
|
||||
registry-url: 'https://registry.npmjs.org'
|
||||
- run: cd packages/data-provider && npm ci
|
||||
- run: cd packages/data-provider && npm run build
|
||||
|
||||
2
.github/workflows/data-schemas.yml
vendored
2
.github/workflows/data-schemas.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
- name: Use Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20.x'
|
||||
node-version: '18.x'
|
||||
|
||||
- name: Install dependencies
|
||||
run: cd packages/data-schemas && npm ci
|
||||
|
||||
17
.github/workflows/deploy-dev.yml
vendored
17
.github/workflows/deploy-dev.yml
vendored
@@ -2,7 +2,7 @@ name: Update Test Server
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Docker Dev Branch Images Build"]
|
||||
workflows: ["Docker Dev Images Build"]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
@@ -12,8 +12,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.repository == 'danny-avila/LibreChat' &&
|
||||
(github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.head_branch == 'dev'))
|
||||
(github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success')
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -30,17 +29,13 @@ jobs:
|
||||
DO_USER: ${{ secrets.DO_USER }}
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF
|
||||
sudo -i -u danny bash << 'EEOF'
|
||||
sudo -i -u danny bash << EEOF
|
||||
cd ~/LibreChat && \
|
||||
git fetch origin main && \
|
||||
sudo npm run stop:deployed && \
|
||||
sudo docker images --format "{{.Repository}}:{{.ID}}" | grep -E "lc-dev|librechat" | cut -d: -f2 | xargs -r sudo docker rmi -f || true && \
|
||||
sudo npm run update:deployed && \
|
||||
git checkout dev && \
|
||||
git pull origin dev && \
|
||||
npm run update:deployed && \
|
||||
git checkout do-deploy && \
|
||||
git rebase dev && \
|
||||
sudo npm run start:deployed && \
|
||||
git rebase main && \
|
||||
npm run start:deployed && \
|
||||
echo "Update completed. Application should be running now."
|
||||
EEOF
|
||||
EOF
|
||||
|
||||
72
.github/workflows/dev-branch-images.yml
vendored
72
.github/workflows/dev-branch-images.yml
vendored
@@ -1,72 +0,0 @@
|
||||
name: Docker Dev Branch Images Build
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- 'api/**'
|
||||
- 'client/**'
|
||||
- 'packages/**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: api-build
|
||||
file: Dockerfile.multi
|
||||
image_name: lc-dev-api
|
||||
- target: node
|
||||
file: Dockerfile
|
||||
image_name: lc-dev
|
||||
|
||||
steps:
|
||||
# Check out the repository
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Set up QEMU
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
# Set up Docker Buildx
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
# Log in to GitHub Container Registry
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Login to Docker Hub
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# Prepare the environment
|
||||
- name: Prepare environment
|
||||
run: |
|
||||
cp .env.example .env
|
||||
|
||||
# Build and push Docker images for each target
|
||||
- name: Build and push Docker images
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.file }}
|
||||
push: true
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
|
||||
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
|
||||
platforms: linux/amd64,linux/arm64
|
||||
target: ${{ matrix.target }}
|
||||
2
.github/workflows/frontend-review.yml
vendored
2
.github/workflows/frontend-review.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
- release/*
|
||||
paths:
|
||||
- 'client/**'
|
||||
- 'packages/data-provider/**'
|
||||
- 'packages/**'
|
||||
|
||||
jobs:
|
||||
tests_frontend_ubuntu:
|
||||
|
||||
95
.github/workflows/generate-release-changelog-pr.yml
vendored
Normal file
95
.github/workflows/generate-release-changelog-pr.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
name: Generate Release Changelog PR
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
generate-release-changelog-pr:
|
||||
permissions:
|
||||
contents: write # Needed for pushing commits and creating branches.
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# 1. Checkout the repository (with full history).
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
# 2. Generate the release changelog using our custom configuration.
|
||||
- name: Generate Release Changelog
|
||||
id: generate_release
|
||||
uses: mikepenz/release-changelog-builder-action@v5.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
configuration: ".github/configuration-release.json"
|
||||
owner: ${{ github.repository_owner }}
|
||||
repo: ${{ github.event.repository.name }}
|
||||
outputFile: CHANGELOG-release.md
|
||||
|
||||
# 3. Update the main CHANGELOG.md:
|
||||
# - If it doesn't exist, create it with a basic header.
|
||||
# - Remove the "Unreleased" section (if present).
|
||||
# - Prepend the new release changelog above previous releases.
|
||||
# - Remove all temporary files before committing.
|
||||
- name: Update CHANGELOG.md
|
||||
run: |
|
||||
# Determine the release tag, e.g. "v1.2.3"
|
||||
TAG=${GITHUB_REF##*/}
|
||||
echo "Using release tag: $TAG"
|
||||
|
||||
# Ensure CHANGELOG.md exists; if not, create a basic header.
|
||||
if [ ! -f CHANGELOG.md ]; then
|
||||
echo "# Changelog" > CHANGELOG.md
|
||||
echo "" >> CHANGELOG.md
|
||||
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
|
||||
echo "" >> CHANGELOG.md
|
||||
fi
|
||||
|
||||
echo "Updating CHANGELOG.md…"
|
||||
|
||||
# Remove the "Unreleased" section (from "## [Unreleased]" until the first occurrence of '---') if it exists.
|
||||
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
|
||||
awk '/^## \[Unreleased\]/{flag=1} flag && /^---/{flag=0; next} !flag' CHANGELOG.md > CHANGELOG.cleaned
|
||||
else
|
||||
cp CHANGELOG.md CHANGELOG.cleaned
|
||||
fi
|
||||
|
||||
# Split the cleaned file into:
|
||||
# - header.md: content before the first release header ("## [v...").
|
||||
# - tail.md: content from the first release header onward.
|
||||
awk '/^## \[v/{exit} {print}' CHANGELOG.cleaned > header.md
|
||||
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.cleaned > tail.md
|
||||
|
||||
# Combine header, the new release changelog, and the tail.
|
||||
echo "Combining updated changelog parts..."
|
||||
cat header.md CHANGELOG-release.md > CHANGELOG.md.new
|
||||
echo "" >> CHANGELOG.md.new
|
||||
cat tail.md >> CHANGELOG.md.new
|
||||
|
||||
mv CHANGELOG.md.new CHANGELOG.md
|
||||
|
||||
# Remove temporary files.
|
||||
rm -f CHANGELOG.cleaned header.md tail.md CHANGELOG-release.md
|
||||
|
||||
echo "Final CHANGELOG.md content:"
|
||||
cat CHANGELOG.md
|
||||
|
||||
# 4. Create (or update) the Pull Request with the updated CHANGELOG.md.
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
sign-commits: true
|
||||
commit-message: "chore: update CHANGELOG for release ${{ github.ref_name }}"
|
||||
base: main
|
||||
branch: "changelog/${{ github.ref_name }}"
|
||||
reviewers: danny-avila
|
||||
title: "📜 docs: Changelog for release ${{ github.ref_name }}"
|
||||
body: |
|
||||
**Description**:
|
||||
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.
|
||||
107
.github/workflows/generate-unreleased-changelog-pr.yml
vendored
Normal file
107
.github/workflows/generate-unreleased-changelog-pr.yml
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
name: Generate Unreleased Changelog PR
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
generate-unreleased-changelog-pr:
|
||||
permissions:
|
||||
contents: write # Needed for pushing commits and creating branches.
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
# 1. Checkout the repository on main.
|
||||
- name: Checkout Repository on Main
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: main
|
||||
fetch-depth: 0
|
||||
|
||||
# 4. Get the latest version tag.
|
||||
- name: Get Latest Tag
|
||||
id: get_latest_tag
|
||||
run: |
|
||||
LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1) || echo "none")
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
|
||||
|
||||
# 5. Generate the Unreleased changelog.
|
||||
- name: Generate Unreleased Changelog
|
||||
id: generate_unreleased
|
||||
uses: mikepenz/release-changelog-builder-action@v5.1.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
configuration: ".github/configuration-unreleased.json"
|
||||
owner: ${{ github.repository_owner }}
|
||||
repo: ${{ github.event.repository.name }}
|
||||
outputFile: CHANGELOG-unreleased.md
|
||||
fromTag: ${{ steps.get_latest_tag.outputs.tag }}
|
||||
toTag: main
|
||||
|
||||
# 7. Update CHANGELOG.md with the new Unreleased section.
|
||||
- name: Update CHANGELOG.md
|
||||
id: update_changelog
|
||||
run: |
|
||||
# Create CHANGELOG.md if it doesn't exist.
|
||||
if [ ! -f CHANGELOG.md ]; then
|
||||
echo "# Changelog" > CHANGELOG.md
|
||||
echo "" >> CHANGELOG.md
|
||||
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
|
||||
echo "" >> CHANGELOG.md
|
||||
fi
|
||||
|
||||
echo "Updating CHANGELOG.md…"
|
||||
|
||||
# Extract content before the "## [Unreleased]" (or first version header if missing).
|
||||
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
|
||||
awk '/^## \[Unreleased\]/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
|
||||
else
|
||||
awk '/^## \[v/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
|
||||
fi
|
||||
|
||||
# Append the generated Unreleased changelog.
|
||||
echo "" >> CHANGELOG_TMP.md
|
||||
cat CHANGELOG-unreleased.md >> CHANGELOG_TMP.md
|
||||
echo "" >> CHANGELOG_TMP.md
|
||||
|
||||
# Append the remainder of the original changelog (starting from the first version header).
|
||||
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.md >> CHANGELOG_TMP.md
|
||||
|
||||
# Replace the old file with the updated file.
|
||||
mv CHANGELOG_TMP.md CHANGELOG.md
|
||||
|
||||
# Remove the temporary generated file.
|
||||
rm -f CHANGELOG-unreleased.md
|
||||
|
||||
echo "Final CHANGELOG.md:"
|
||||
cat CHANGELOG.md
|
||||
|
||||
# 8. Check if CHANGELOG.md has any updates.
|
||||
- name: Check for CHANGELOG.md changes
|
||||
id: changelog_changes
|
||||
run: |
|
||||
if git diff --quiet CHANGELOG.md; then
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# 9. Create (or update) the Pull Request only if there are changes.
|
||||
- name: Create Pull Request
|
||||
if: steps.changelog_changes.outputs.has_changes == 'true'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
base: main
|
||||
branch: "changelog/unreleased-update"
|
||||
sign-commits: true
|
||||
commit-message: "action: update Unreleased changelog"
|
||||
title: "📜 docs: Unreleased Changelog"
|
||||
body: |
|
||||
**Description**:
|
||||
- This PR updates the Unreleased section in CHANGELOG.md.
|
||||
- It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}),
|
||||
regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.
|
||||
53
.github/workflows/helmcharts.yml
vendored
53
.github/workflows/helmcharts.yml
vendored
@@ -4,13 +4,12 @@ name: Build Helm Charts on Tag
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "chart-*"
|
||||
- "*"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
@@ -27,49 +26,15 @@ jobs:
|
||||
uses: azure/setup-helm@v4
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Build Subchart Deps
|
||||
run: |
|
||||
cd helm/librechat
|
||||
helm dependency build
|
||||
cd ../librechat-rag-api
|
||||
helm dependency build
|
||||
cd helm/librechat-rag-api
|
||||
helm dependency build
|
||||
|
||||
- name: Get Chart Version
|
||||
id: chart-version
|
||||
run: |
|
||||
CHART_VERSION=$(echo "${{ github.ref_name }}" | cut -d'-' -f2)
|
||||
echo "CHART_VERSION=${CHART_VERSION}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
# Log in to GitHub Container Registry
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# Run Helm OCI Charts Releaser
|
||||
# This is for the librechat chart
|
||||
- name: Release Helm OCI Charts for librechat
|
||||
uses: appany/helm-oci-chart-releaser@v0.4.2
|
||||
with:
|
||||
name: librechat
|
||||
repository: ${{ github.actor }}/librechat-chart
|
||||
tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
|
||||
path: helm/librechat
|
||||
registry: ghcr.io
|
||||
registry_username: ${{ github.actor }}
|
||||
registry_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# this is for the librechat-rag-api chart
|
||||
- name: Release Helm OCI Charts for librechat-rag-api
|
||||
uses: appany/helm-oci-chart-releaser@v0.4.2
|
||||
with:
|
||||
name: librechat-rag-api
|
||||
repository: ${{ github.actor }}/librechat-chart
|
||||
tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
|
||||
path: helm/librechat-rag-api
|
||||
registry: ghcr.io
|
||||
registry_username: ${{ github.actor }}
|
||||
registry_password: ${{ secrets.GITHUB_TOKEN }}
|
||||
charts_dir: helm
|
||||
skip_existing: true
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
37
.github/workflows/i18n-unused-keys.yml
vendored
37
.github/workflows/i18n-unused-keys.yml
vendored
@@ -1,24 +1,16 @@
|
||||
name: Detect Unused i18next Strings
|
||||
|
||||
# This workflow checks for unused i18n keys in translation files.
|
||||
# It has special handling for:
|
||||
# - com_ui_special_var_* keys that are dynamically constructed
|
||||
# - com_agents_category_* keys that are stored in the database and used dynamically
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "client/src/**"
|
||||
- "api/**"
|
||||
- "packages/data-provider/src/**"
|
||||
- "packages/client/**"
|
||||
- "packages/data-schemas/src/**"
|
||||
|
||||
jobs:
|
||||
detect-unused-i18n-keys:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
pull-requests: write
|
||||
pull-requests: write # Required for posting PR comments
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
@@ -30,7 +22,7 @@ jobs:
|
||||
|
||||
# Define paths
|
||||
I18N_FILE="client/src/locales/en/translation.json"
|
||||
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src" "packages/client" "packages/data-schemas/src")
|
||||
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src")
|
||||
|
||||
# Check if translation file exists
|
||||
if [[ ! -f "$I18N_FILE" ]]; then
|
||||
@@ -58,31 +50,6 @@ jobs:
|
||||
fi
|
||||
done
|
||||
|
||||
# Also check if the key is directly used somewhere
|
||||
if [[ "$FOUND" == false ]]; then
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
|
||||
FOUND=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
# Special case for agent category keys that are dynamically used from database
|
||||
elif [[ "$KEY" == com_agents_category_* ]]; then
|
||||
# Check if agent category localization is being used
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
# Check for dynamic category label/description usage
|
||||
if grep -r --include=\*.{js,jsx,ts,tsx} -E "category\.(label|description).*startsWith.*['\"]com_" "$DIR" > /dev/null 2>&1 || \
|
||||
# Check for the method that defines these keys
|
||||
grep -r --include=\*.{js,jsx,ts,tsx} "ensureDefaultCategories" "$DIR" > /dev/null 2>&1 || \
|
||||
# Check for direct usage in agentCategory.ts
|
||||
grep -r --include=\*.ts -E "label:.*['\"]$KEY['\"]" "$DIR" > /dev/null 2>&1 || \
|
||||
grep -r --include=\*.ts -E "description:.*['\"]$KEY['\"]" "$DIR" > /dev/null 2>&1; then
|
||||
FOUND=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Also check if the key is directly used somewhere
|
||||
if [[ "$FOUND" == false ]]; then
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
|
||||
2
.github/workflows/locize-i18n-sync.yml
vendored
2
.github/workflows/locize-i18n-sync.yml
vendored
@@ -48,7 +48,7 @@ jobs:
|
||||
|
||||
# 2. Download translation files from locize.
|
||||
- name: Download Translations from locize
|
||||
uses: locize/download@v2
|
||||
uses: locize/download@v1
|
||||
with:
|
||||
project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
|
||||
path: "client/src/locales"
|
||||
|
||||
103
.github/workflows/unused-packages.yml
vendored
103
.github/workflows/unused-packages.yml
vendored
@@ -7,7 +7,6 @@ on:
|
||||
- 'package-lock.json'
|
||||
- 'client/**'
|
||||
- 'api/**'
|
||||
- 'packages/client/**'
|
||||
|
||||
jobs:
|
||||
detect-unused-packages:
|
||||
@@ -29,7 +28,7 @@ jobs:
|
||||
|
||||
- name: Validate JSON files
|
||||
run: |
|
||||
for FILE in package.json client/package.json api/package.json packages/client/package.json; do
|
||||
for FILE in package.json client/package.json api/package.json; do
|
||||
if [[ -f "$FILE" ]]; then
|
||||
jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1)
|
||||
fi
|
||||
@@ -64,31 +63,12 @@ jobs:
|
||||
local folder=$1
|
||||
local output_file=$2
|
||||
if [[ -d "$folder" ]]; then
|
||||
# Extract require() statements
|
||||
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,mjs,cjs} | \
|
||||
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
|
||||
|
||||
# Extract ES6 imports - various patterns
|
||||
# import x from 'module'
|
||||
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,mjs,cjs} | \
|
||||
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||
|
||||
# import 'module' (side-effect imports)
|
||||
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||
|
||||
# export { x } from 'module' or export * from 'module'
|
||||
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
|
||||
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||
|
||||
# import type { x } from 'module' (TypeScript)
|
||||
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
|
||||
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
|
||||
|
||||
# Remove subpath imports but keep the base package
|
||||
# e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
|
||||
sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
|
||||
|
||||
sort -u "$output_file" -o "$output_file"
|
||||
else
|
||||
touch "$output_file"
|
||||
@@ -98,80 +78,13 @@ jobs:
|
||||
extract_deps_from_code "." root_used_code.txt
|
||||
extract_deps_from_code "client" client_used_code.txt
|
||||
extract_deps_from_code "api" api_used_code.txt
|
||||
|
||||
# Extract dependencies used by @librechat/client package
|
||||
extract_deps_from_code "packages/client" packages_client_used_code.txt
|
||||
|
||||
- name: Get @librechat/client dependencies
|
||||
id: get-librechat-client-deps
|
||||
run: |
|
||||
if [[ -f "packages/client/package.json" ]]; then
|
||||
# Get all dependencies from @librechat/client (dependencies, devDependencies, and peerDependencies)
|
||||
DEPS=$(jq -r '.dependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
|
||||
DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
|
||||
PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
|
||||
|
||||
# Combine all dependencies
|
||||
echo "$DEPS" > librechat_client_deps.txt
|
||||
echo "$DEV_DEPS" >> librechat_client_deps.txt
|
||||
echo "$PEER_DEPS" >> librechat_client_deps.txt
|
||||
|
||||
# Also include dependencies that are imported in packages/client
|
||||
cat packages_client_used_code.txt >> librechat_client_deps.txt
|
||||
|
||||
# Remove empty lines and sort
|
||||
grep -v '^$' librechat_client_deps.txt | sort -u > temp_deps.txt
|
||||
mv temp_deps.txt librechat_client_deps.txt
|
||||
else
|
||||
touch librechat_client_deps.txt
|
||||
fi
|
||||
|
||||
- name: Extract Workspace Dependencies
|
||||
id: extract-workspace-deps
|
||||
run: |
|
||||
# Function to get dependencies from a workspace package that are used by another package
|
||||
get_workspace_package_deps() {
|
||||
local package_json=$1
|
||||
local output_file=$2
|
||||
|
||||
# Get all workspace dependencies (starting with @librechat/)
|
||||
if [[ -f "$package_json" ]]; then
|
||||
local workspace_deps=$(jq -r '.dependencies // {} | to_entries[] | select(.key | startswith("@librechat/")) | .key' "$package_json" 2>/dev/null || echo "")
|
||||
|
||||
# For each workspace dependency, get its dependencies
|
||||
for dep in $workspace_deps; do
|
||||
# Convert @librechat/api to packages/api
|
||||
local workspace_path=$(echo "$dep" | sed 's/@librechat\//packages\//')
|
||||
local workspace_package_json="${workspace_path}/package.json"
|
||||
|
||||
if [[ -f "$workspace_package_json" ]]; then
|
||||
# Extract all dependencies from the workspace package
|
||||
jq -r '.dependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file"
|
||||
# Also extract peerDependencies
|
||||
jq -r '.peerDependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [[ -f "$output_file" ]]; then
|
||||
sort -u "$output_file" -o "$output_file"
|
||||
else
|
||||
touch "$output_file"
|
||||
fi
|
||||
}
|
||||
|
||||
# Get workspace dependencies for each package
|
||||
get_workspace_package_deps "package.json" root_workspace_deps.txt
|
||||
get_workspace_package_deps "client/package.json" client_workspace_deps.txt
|
||||
get_workspace_package_deps "api/package.json" api_workspace_deps.txt
|
||||
|
||||
- name: Run depcheck for root package.json
|
||||
id: check-root
|
||||
run: |
|
||||
if [[ -f "package.json" ]]; then
|
||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||
# Exclude dependencies used in scripts, code, and workspace packages
|
||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt root_workspace_deps.txt | sort) || echo "")
|
||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt | sort) || echo "")
|
||||
echo "ROOT_UNUSED<<EOF" >> $GITHUB_ENV
|
||||
echo "$UNUSED" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
@@ -184,10 +97,7 @@ jobs:
|
||||
chmod -R 755 client
|
||||
cd client
|
||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||
# Exclude dependencies used in scripts, code, and workspace packages
|
||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "")
|
||||
# Filter out false positives
|
||||
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
|
||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt | sort) || echo "")
|
||||
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
|
||||
echo "$UNUSED" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
@@ -201,8 +111,7 @@ jobs:
|
||||
chmod -R 755 api
|
||||
cd api
|
||||
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
|
||||
# Exclude dependencies used in scripts, code, and workspace packages
|
||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "")
|
||||
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt | sort) || echo "")
|
||||
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
|
||||
echo "$UNUSED" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -13,9 +13,6 @@ pids
|
||||
*.seed
|
||||
.git
|
||||
|
||||
# CI/CD data
|
||||
test-image*
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
|
||||
@@ -58,7 +55,6 @@ bower_components/
|
||||
# AI
|
||||
.clineignore
|
||||
.cursor
|
||||
.aider*
|
||||
|
||||
# Floobits
|
||||
.floo
|
||||
@@ -128,13 +124,3 @@ helm/**/.values.yaml
|
||||
|
||||
# SAML Idp cert
|
||||
*.cert
|
||||
|
||||
# AI Assistants
|
||||
/.claude/
|
||||
/.cursor/
|
||||
/.copilot/
|
||||
/.aider/
|
||||
/.openai/
|
||||
/.tabnine/
|
||||
/.codeium
|
||||
*.local.md
|
||||
|
||||
@@ -1,2 +1,5 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
[ -n "$CI" ] && exit 0
|
||||
npx lint-staged --config ./.husky/lint-staged.config.js
|
||||
|
||||
3
.vscode/launch.json
vendored
3
.vscode/launch.json
vendored
@@ -8,8 +8,7 @@
|
||||
"skipFiles": ["<node_internals>/**"],
|
||||
"program": "${workspaceFolder}/api/server/index.js",
|
||||
"env": {
|
||||
"NODE_ENV": "production",
|
||||
"NODE_TLS_REJECT_UNAUTHORIZED": "0"
|
||||
"NODE_ENV": "production"
|
||||
},
|
||||
"console": "integratedTerminal",
|
||||
"envFile": "${workspaceFolder}/.env"
|
||||
|
||||
21
Dockerfile
21
Dockerfile
@@ -1,4 +1,4 @@
|
||||
# v0.8.1-rc1
|
||||
# v0.7.8
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
@@ -19,31 +19,24 @@ WORKDIR /app
|
||||
|
||||
USER node
|
||||
|
||||
COPY --chown=node:node package.json package-lock.json ./
|
||||
COPY --chown=node:node api/package.json ./api/package.json
|
||||
COPY --chown=node:node client/package.json ./client/package.json
|
||||
COPY --chown=node:node packages/data-provider/package.json ./packages/data-provider/package.json
|
||||
COPY --chown=node:node packages/data-schemas/package.json ./packages/data-schemas/package.json
|
||||
COPY --chown=node:node packages/api/package.json ./packages/api/package.json
|
||||
COPY --chown=node:node . .
|
||||
|
||||
RUN \
|
||||
# Allow mounting of these files, which have no default
|
||||
touch .env ; \
|
||||
# Create directories for the volumes to inherit the correct permissions
|
||||
mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \
|
||||
mkdir -p /app/client/public/images /app/api/logs ; \
|
||||
npm config set fetch-retry-maxtimeout 600000 ; \
|
||||
npm config set fetch-retries 5 ; \
|
||||
npm config set fetch-retry-mintimeout 15000 ; \
|
||||
npm ci --no-audit
|
||||
|
||||
COPY --chown=node:node . .
|
||||
|
||||
RUN \
|
||||
npm install --no-audit; \
|
||||
# React client build
|
||||
NODE_OPTIONS="--max-old-space-size=2048" npm run frontend; \
|
||||
npm prune --production; \
|
||||
npm cache clean --force
|
||||
|
||||
RUN mkdir -p /app/client/public/images /app/api/logs
|
||||
|
||||
# Node API setup
|
||||
EXPOSE 3080
|
||||
ENV HOST=0.0.0.0
|
||||
@@ -54,4 +47,4 @@ CMD ["npm", "run", "backend"]
|
||||
# WORKDIR /usr/share/nginx/html
|
||||
# COPY --from=node /app/client/dist /usr/share/nginx/html
|
||||
# COPY client/nginx.conf /etc/nginx/conf.d/default.conf
|
||||
# ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||
# ENTRYPOINT ["nginx", "-g", "daemon off;"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Dockerfile.multi
|
||||
# v0.8.1-rc1
|
||||
# v0.7.8
|
||||
|
||||
# Base for all builds
|
||||
FROM node:20-alpine AS base-min
|
||||
@@ -14,9 +14,8 @@ RUN npm config set fetch-retry-maxtimeout 600000 && \
|
||||
npm config set fetch-retry-mintimeout 15000
|
||||
COPY package*.json ./
|
||||
COPY packages/data-provider/package*.json ./packages/data-provider/
|
||||
COPY packages/api/package*.json ./packages/api/
|
||||
COPY packages/mcp/package*.json ./packages/mcp/
|
||||
COPY packages/data-schemas/package*.json ./packages/data-schemas/
|
||||
COPY packages/client/package*.json ./packages/client/
|
||||
COPY client/package*.json ./client/
|
||||
COPY api/package*.json ./api/
|
||||
|
||||
@@ -25,40 +24,31 @@ FROM base-min AS base
|
||||
WORKDIR /app
|
||||
RUN npm ci
|
||||
|
||||
# Build `data-provider` package
|
||||
# Build data-provider
|
||||
FROM base AS data-provider-build
|
||||
WORKDIR /app/packages/data-provider
|
||||
COPY packages/data-provider ./
|
||||
RUN npm run build
|
||||
|
||||
# Build `data-schemas` package
|
||||
# Build mcp package
|
||||
FROM base AS mcp-build
|
||||
WORKDIR /app/packages/mcp
|
||||
COPY packages/mcp ./
|
||||
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
|
||||
RUN npm run build
|
||||
|
||||
# Build data-schemas
|
||||
FROM base AS data-schemas-build
|
||||
WORKDIR /app/packages/data-schemas
|
||||
COPY packages/data-schemas ./
|
||||
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
|
||||
RUN npm run build
|
||||
|
||||
# Build `api` package
|
||||
FROM base AS api-package-build
|
||||
WORKDIR /app/packages/api
|
||||
COPY packages/api ./
|
||||
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
|
||||
COPY --from=data-schemas-build /app/packages/data-schemas/dist /app/packages/data-schemas/dist
|
||||
RUN npm run build
|
||||
|
||||
# Build `client` package
|
||||
FROM base AS client-package-build
|
||||
WORKDIR /app/packages/client
|
||||
COPY packages/client ./
|
||||
RUN npm run build
|
||||
|
||||
# Client build
|
||||
FROM base AS client-build
|
||||
WORKDIR /app/client
|
||||
COPY client ./
|
||||
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
|
||||
COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist
|
||||
COPY --from=client-package-build /app/packages/client/src /app/packages/client/src
|
||||
ENV NODE_OPTIONS="--max-old-space-size=2048"
|
||||
RUN npm run build
|
||||
|
||||
@@ -73,8 +63,8 @@ RUN npm ci --omit=dev
|
||||
COPY api ./api
|
||||
COPY config ./config
|
||||
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
|
||||
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
|
||||
COPY --from=data-schemas-build /app/packages/data-schemas/dist ./packages/data-schemas/dist
|
||||
COPY --from=api-package-build /app/packages/api/dist ./packages/api/dist
|
||||
COPY --from=client-build /app/client/dist ./client/dist
|
||||
WORKDIR /app/api
|
||||
EXPOSE 3080
|
||||
|
||||
25
README.md
25
README.md
@@ -52,7 +52,7 @@
|
||||
- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features
|
||||
|
||||
- 🤖 **AI Model Selection**:
|
||||
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Responses API (incl. Azure)
|
||||
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Assistants API (incl. Azure)
|
||||
- [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required
|
||||
- Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):
|
||||
- Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai,
|
||||
@@ -65,17 +65,15 @@
|
||||
|
||||
- 🔦 **Agents & Tools Integration**:
|
||||
- **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**:
|
||||
- No-Code Custom Assistants: Build specialized, AI-driven helpers
|
||||
- Agent Marketplace: Discover and deploy community-built agents
|
||||
- Collaborative Sharing: Share agents with specific users and groups
|
||||
- Flexible & Extensible: Use MCP Servers, tools, file search, code execution, and more
|
||||
- Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, Google, Vertex AI, Responses API, and more
|
||||
- No-Code Custom Assistants: Build specialized, AI-driven helpers without coding
|
||||
- Flexible & Extensible: Attach tools like DALL-E-3, file search, code execution, and more
|
||||
- Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, and more
|
||||
- [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
|
||||
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions
|
||||
|
||||
- 🔍 **Web Search**:
|
||||
- Search the internet and retrieve relevant information to enhance your AI context
|
||||
- Combines search providers, content scrapers, and result rerankers for optimal results
|
||||
- **Customizable Jina Reranking**: Configure custom Jina API URLs for reranking services
|
||||
- **[Learn More →](https://www.librechat.ai/docs/features/web_search)**
|
||||
|
||||
- 🪄 **Generative UI with Code Artifacts**:
|
||||
@@ -90,18 +88,15 @@
|
||||
- Create, Save, & Share Custom Presets
|
||||
- Switch between AI Endpoints and Presets mid-chat
|
||||
- Edit, Resubmit, and Continue Messages with Conversation branching
|
||||
- Create and share prompts with specific users and groups
|
||||
- [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control
|
||||
|
||||
- 💬 **Multimodal & File Interactions**:
|
||||
- Upload and analyze images with Claude 3, GPT-4.5, GPT-4o, o1, Llama-Vision, and Gemini 📸
|
||||
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️
|
||||
|
||||
- 🌎 **Multilingual UI**:
|
||||
- English, 中文 (简体), 中文 (繁體), العربية, Deutsch, Español, Français, Italiano
|
||||
- Polski, Português (PT), Português (BR), Русский, 日本語, Svenska, 한국어, Tiếng Việt
|
||||
- Türkçe, Nederlands, עברית, Català, Čeština, Dansk, Eesti, فارسی
|
||||
- Suomi, Magyar, Հայերեն, Bahasa Indonesia, ქართული, Latviešu, ไทย, ئۇيغۇرچە
|
||||
- 🌎 **Multilingual UI**:
|
||||
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro
|
||||
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
|
||||
|
||||
- 🧠 **Reasoning UI**:
|
||||
- Dynamic Reasoning UI for Chain-of-Thought/Reasoning AI models like DeepSeek-R1
|
||||
@@ -155,8 +150,8 @@ Click on the thumbnail to open the video☝️
|
||||
|
||||
**Other:**
|
||||
- **Website:** [librechat.ai](https://librechat.ai)
|
||||
- **Documentation:** [librechat.ai/docs](https://librechat.ai/docs)
|
||||
- **Blog:** [librechat.ai/blog](https://librechat.ai/blog)
|
||||
- **Documentation:** [docs.librechat.ai](https://docs.librechat.ai)
|
||||
- **Blog:** [blog.librechat.ai](https://blog.librechat.ai)
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
const Anthropic = require('@anthropic-ai/sdk');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const {
|
||||
Constants,
|
||||
@@ -10,18 +9,7 @@ const {
|
||||
getResponseSender,
|
||||
validateVisionModel,
|
||||
} = require('librechat-data-provider');
|
||||
const { sleep, SplitStreamHandler: _Handler } = require('@librechat/agents');
|
||||
const {
|
||||
Tokenizer,
|
||||
createFetch,
|
||||
matchModelName,
|
||||
getClaudeHeaders,
|
||||
getModelMaxTokens,
|
||||
configureReasoning,
|
||||
checkPromptCacheSupport,
|
||||
getModelMaxOutputTokens,
|
||||
createStreamEventHandlers,
|
||||
} = require('@librechat/api');
|
||||
const { SplitStreamHandler: _Handler } = require('@librechat/agents');
|
||||
const {
|
||||
truncateText,
|
||||
formatMessage,
|
||||
@@ -30,9 +18,19 @@ const {
|
||||
parseParamFromPrompt,
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const {
|
||||
getClaudeHeaders,
|
||||
configureReasoning,
|
||||
checkPromptCacheSupport,
|
||||
} = require('~/server/services/Endpoints/anthropic/helpers');
|
||||
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
|
||||
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { createFetch, createStreamEventHandlers } = require('./generators');
|
||||
const Tokenizer = require('~/server/services/Tokenizer');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const HUMAN_PROMPT = '\n\nHuman:';
|
||||
const AI_PROMPT = '\n\nAssistant:';
|
||||
@@ -193,11 +191,10 @@ class AnthropicClient extends BaseClient {
|
||||
reverseProxyUrl: this.options.reverseProxyUrl,
|
||||
}),
|
||||
apiKey: this.apiKey,
|
||||
fetchOptions: {},
|
||||
};
|
||||
|
||||
if (this.options.proxy) {
|
||||
options.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
|
||||
options.httpAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
|
||||
@@ -1,31 +1,22 @@
|
||||
const crypto = require('crypto');
|
||||
const fetch = require('node-fetch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
getBalanceConfig,
|
||||
extractFileContext,
|
||||
encodeAndFormatAudios,
|
||||
encodeAndFormatVideos,
|
||||
encodeAndFormatDocuments,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Constants,
|
||||
ErrorTypes,
|
||||
FileSources,
|
||||
supportsBalanceCheck,
|
||||
isAgentsEndpoint,
|
||||
isParamEndpoint,
|
||||
EModelEndpoint,
|
||||
ContentTypes,
|
||||
excludedKeys,
|
||||
EModelEndpoint,
|
||||
isParamEndpoint,
|
||||
isAgentsEndpoint,
|
||||
supportsBalanceCheck,
|
||||
ErrorTypes,
|
||||
Constants,
|
||||
} = require('librechat-data-provider');
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { truncateToolCallOutputs } = require('./prompts');
|
||||
const countTokens = require('~/server/utils/countTokens');
|
||||
const { addSpaceIfNeeded } = require('~/server/utils');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const TextStream = require('./TextStream');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
@@ -47,8 +38,6 @@ class BaseClient {
|
||||
this.conversationId;
|
||||
/** @type {string} */
|
||||
this.responseMessageId;
|
||||
/** @type {string} */
|
||||
this.parentMessageId;
|
||||
/** @type {TAttachment[]} */
|
||||
this.attachments;
|
||||
/** The key for the usage object's input tokens
|
||||
@@ -120,17 +109,12 @@ class BaseClient {
|
||||
/**
|
||||
* Abstract method to record token usage. Subclasses must implement this method.
|
||||
* If a correction to the token usage is needed, the method should return an object with the corrected token counts.
|
||||
* Should only be used if `recordCollectedUsage` was not used instead.
|
||||
* @param {string} [model]
|
||||
* @param {AppConfig['balance']} [balance]
|
||||
* @param {number} promptTokens
|
||||
* @param {number} completionTokens
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async recordTokenUsage({ model, balance, promptTokens, completionTokens }) {
|
||||
async recordTokenUsage({ promptTokens, completionTokens }) {
|
||||
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
|
||||
model,
|
||||
balance,
|
||||
promptTokens,
|
||||
completionTokens,
|
||||
});
|
||||
@@ -199,8 +183,7 @@ class BaseClient {
|
||||
this.user = user;
|
||||
const saveOptions = this.getSaveOptions();
|
||||
this.abortController = opts.abortController ?? new AbortController();
|
||||
const requestConvoId = overrideConvoId ?? opts.conversationId;
|
||||
const conversationId = requestConvoId ?? crypto.randomUUID();
|
||||
const conversationId = overrideConvoId ?? opts.conversationId ?? crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
|
||||
const userMessageId =
|
||||
overrideUserMessageId ?? opts.overrideParentMessageId ?? crypto.randomUUID();
|
||||
@@ -215,22 +198,17 @@ class BaseClient {
|
||||
this.currentMessages[this.currentMessages.length - 1].messageId = head;
|
||||
}
|
||||
|
||||
if (opts.isRegenerate && responseMessageId.endsWith('_')) {
|
||||
responseMessageId = crypto.randomUUID();
|
||||
}
|
||||
|
||||
this.responseMessageId = responseMessageId;
|
||||
|
||||
return {
|
||||
...opts,
|
||||
user,
|
||||
head,
|
||||
saveOptions,
|
||||
userMessageId,
|
||||
requestConvoId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
userMessageId,
|
||||
responseMessageId,
|
||||
saveOptions,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -249,12 +227,11 @@ class BaseClient {
|
||||
const {
|
||||
user,
|
||||
head,
|
||||
saveOptions,
|
||||
userMessageId,
|
||||
requestConvoId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
userMessageId,
|
||||
responseMessageId,
|
||||
saveOptions,
|
||||
} = await this.setMessageOptions(opts);
|
||||
|
||||
const userMessage = opts.isEdited
|
||||
@@ -276,8 +253,7 @@ class BaseClient {
|
||||
}
|
||||
|
||||
if (typeof opts?.onStart === 'function') {
|
||||
const isNewConvo = !requestConvoId && parentMessageId === Constants.NO_PARENT;
|
||||
opts.onStart(userMessage, responseMessageId, isNewConvo);
|
||||
opts.onStart(userMessage, responseMessageId);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -583,7 +559,6 @@ class BaseClient {
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
const appConfig = this.options.req?.config;
|
||||
/** @type {Promise<TMessage>} */
|
||||
let userMessagePromise;
|
||||
const { user, head, isEdited, conversationId, responseMessageId, saveOptions, userMessage } =
|
||||
@@ -597,7 +572,7 @@ class BaseClient {
|
||||
});
|
||||
}
|
||||
|
||||
const { editedContent } = opts;
|
||||
const { generation = '' } = opts;
|
||||
|
||||
// It's not necessary to push to currentMessages
|
||||
// depending on subclass implementation of handling messages
|
||||
@@ -612,40 +587,26 @@ class BaseClient {
|
||||
isCreatedByUser: false,
|
||||
model: this.modelOptions?.model ?? this.model,
|
||||
sender: this.sender,
|
||||
text: generation,
|
||||
};
|
||||
this.currentMessages.push(userMessage, latestMessage);
|
||||
} else if (editedContent != null) {
|
||||
// Handle editedContent for content parts
|
||||
if (editedContent && latestMessage.content && Array.isArray(latestMessage.content)) {
|
||||
const { index, text, type } = editedContent;
|
||||
if (index >= 0 && index < latestMessage.content.length) {
|
||||
const contentPart = latestMessage.content[index];
|
||||
if (type === ContentTypes.THINK && contentPart.type === ContentTypes.THINK) {
|
||||
contentPart[ContentTypes.THINK] = text;
|
||||
} else if (type === ContentTypes.TEXT && contentPart.type === ContentTypes.TEXT) {
|
||||
contentPart[ContentTypes.TEXT] = text;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
latestMessage.text = generation;
|
||||
}
|
||||
this.continued = true;
|
||||
} else {
|
||||
this.currentMessages.push(userMessage);
|
||||
}
|
||||
|
||||
/**
|
||||
* When the userMessage is pushed to currentMessages, the parentMessage is the userMessageId.
|
||||
* this only matters when buildMessages is utilizing the parentMessageId, and may vary on implementation
|
||||
*/
|
||||
const parentMessageId = isEdited ? head : userMessage.messageId;
|
||||
this.parentMessageId = parentMessageId;
|
||||
let {
|
||||
prompt: payload,
|
||||
tokenCountMap,
|
||||
promptTokens,
|
||||
} = await this.buildMessages(
|
||||
this.currentMessages,
|
||||
parentMessageId,
|
||||
// When the userMessage is pushed to currentMessages, the parentMessage is the userMessageId.
|
||||
// this only matters when buildMessages is utilizing the parentMessageId, and may vary on implementation
|
||||
isEdited ? head : userMessage.messageId,
|
||||
this.getBuildMessagesOptions(opts),
|
||||
opts,
|
||||
);
|
||||
@@ -670,9 +631,9 @@ class BaseClient {
|
||||
}
|
||||
}
|
||||
|
||||
const balanceConfig = getBalanceConfig(appConfig);
|
||||
const balance = this.options.req?.app?.locals?.balance;
|
||||
if (
|
||||
balanceConfig?.enabled &&
|
||||
balance?.enabled &&
|
||||
supportsBalanceCheck[this.options.endpointType ?? this.options.endpoint]
|
||||
) {
|
||||
await checkBalance({
|
||||
@@ -711,32 +672,16 @@ class BaseClient {
|
||||
};
|
||||
|
||||
if (typeof completion === 'string') {
|
||||
responseMessage.text = completion;
|
||||
responseMessage.text = addSpaceIfNeeded(generation) + completion;
|
||||
} else if (
|
||||
Array.isArray(completion) &&
|
||||
(this.clientName === EModelEndpoint.agents ||
|
||||
isParamEndpoint(this.options.endpoint, this.options.endpointType))
|
||||
) {
|
||||
responseMessage.text = '';
|
||||
|
||||
if (!opts.editedContent || this.currentMessages.length === 0) {
|
||||
responseMessage.content = completion;
|
||||
} else {
|
||||
const latestMessage = this.currentMessages[this.currentMessages.length - 1];
|
||||
if (!latestMessage?.content) {
|
||||
responseMessage.content = completion;
|
||||
} else {
|
||||
const existingContent = [...latestMessage.content];
|
||||
const { type: editedType } = opts.editedContent;
|
||||
responseMessage.content = this.mergeEditedContent(
|
||||
existingContent,
|
||||
completion,
|
||||
editedType,
|
||||
);
|
||||
}
|
||||
}
|
||||
responseMessage.content = completion;
|
||||
} else if (Array.isArray(completion)) {
|
||||
responseMessage.text = completion.join('');
|
||||
responseMessage.text = addSpaceIfNeeded(generation) + completion.join('');
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -767,14 +712,9 @@ class BaseClient {
|
||||
} else {
|
||||
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
|
||||
completionTokens = responseMessage.tokenCount;
|
||||
await this.recordTokenUsage({
|
||||
usage,
|
||||
promptTokens,
|
||||
completionTokens,
|
||||
balance: balanceConfig,
|
||||
model: responseMessage.model,
|
||||
});
|
||||
}
|
||||
|
||||
await this.recordTokenUsage({ promptTokens, completionTokens, usage });
|
||||
}
|
||||
|
||||
if (userMessagePromise) {
|
||||
@@ -852,8 +792,7 @@ class BaseClient {
|
||||
|
||||
userMessage.tokenCount = userMessageTokenCount;
|
||||
/*
|
||||
Note: `AgentController` saves the user message if not saved here
|
||||
(noted by `savedMessageIds`), so we update the count of its `userMessage` reference
|
||||
Note: `AskController` saves the user message, so we update the count of its `userMessage` reference
|
||||
*/
|
||||
if (typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({
|
||||
@@ -862,8 +801,7 @@ class BaseClient {
|
||||
}
|
||||
/*
|
||||
Note: we update the user message to be sure it gets the calculated token count;
|
||||
though `AgentController` saves the user message if not saved here
|
||||
(noted by `savedMessageIds`), EditController does not
|
||||
though `AskController` saves the user message, EditController does not
|
||||
*/
|
||||
await userMessagePromise;
|
||||
await this.updateMessageInDatabase({
|
||||
@@ -1155,50 +1093,6 @@ class BaseClient {
|
||||
return numTokens;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges completion content with existing content when editing TEXT or THINK types
|
||||
* @param {Array} existingContent - The existing content array
|
||||
* @param {Array} newCompletion - The new completion content
|
||||
* @param {string} editedType - The type of content being edited
|
||||
* @returns {Array} The merged content array
|
||||
*/
|
||||
mergeEditedContent(existingContent, newCompletion, editedType) {
|
||||
if (!newCompletion.length) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
if (editedType !== ContentTypes.TEXT && editedType !== ContentTypes.THINK) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
const lastIndex = existingContent.length - 1;
|
||||
const lastExisting = existingContent[lastIndex];
|
||||
const firstNew = newCompletion[0];
|
||||
|
||||
if (lastExisting?.type !== firstNew?.type || firstNew?.type !== editedType) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
const mergedContent = [...existingContent];
|
||||
if (editedType === ContentTypes.TEXT) {
|
||||
mergedContent[lastIndex] = {
|
||||
...mergedContent[lastIndex],
|
||||
[ContentTypes.TEXT]:
|
||||
(mergedContent[lastIndex][ContentTypes.TEXT] || '') + (firstNew[ContentTypes.TEXT] || ''),
|
||||
};
|
||||
} else {
|
||||
mergedContent[lastIndex] = {
|
||||
...mergedContent[lastIndex],
|
||||
[ContentTypes.THINK]:
|
||||
(mergedContent[lastIndex][ContentTypes.THINK] || '') +
|
||||
(firstNew[ContentTypes.THINK] || ''),
|
||||
};
|
||||
}
|
||||
|
||||
// Add remaining completion items
|
||||
return mergedContent.concat(newCompletion.slice(1));
|
||||
}
|
||||
|
||||
async sendPayload(payload, opts = {}) {
|
||||
if (opts && typeof opts === 'object') {
|
||||
this.setOptions(opts);
|
||||
@@ -1207,135 +1101,8 @@ class BaseClient {
|
||||
return await this.sendCompletion(payload, opts);
|
||||
}
|
||||
|
||||
async addDocuments(message, attachments) {
|
||||
const documentResult = await encodeAndFormatDocuments(
|
||||
this.options.req,
|
||||
attachments,
|
||||
{
|
||||
provider: this.options.agent?.provider,
|
||||
useResponsesApi: this.options.agent?.model_parameters?.useResponsesApi,
|
||||
},
|
||||
getStrategyFunctions,
|
||||
);
|
||||
message.documents =
|
||||
documentResult.documents && documentResult.documents.length
|
||||
? documentResult.documents
|
||||
: undefined;
|
||||
return documentResult.files;
|
||||
}
|
||||
|
||||
async addVideos(message, attachments) {
|
||||
const videoResult = await encodeAndFormatVideos(
|
||||
this.options.req,
|
||||
attachments,
|
||||
this.options.agent.provider,
|
||||
getStrategyFunctions,
|
||||
);
|
||||
message.videos =
|
||||
videoResult.videos && videoResult.videos.length ? videoResult.videos : undefined;
|
||||
return videoResult.files;
|
||||
}
|
||||
|
||||
async addAudios(message, attachments) {
|
||||
const audioResult = await encodeAndFormatAudios(
|
||||
this.options.req,
|
||||
attachments,
|
||||
this.options.agent.provider,
|
||||
getStrategyFunctions,
|
||||
);
|
||||
message.audios =
|
||||
audioResult.audios && audioResult.audios.length ? audioResult.audios : undefined;
|
||||
return audioResult.files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts text context from attachments and sets it on the message.
|
||||
* This handles text that was already extracted from files (OCR, transcriptions, document text, etc.)
|
||||
* @param {TMessage} message - The message to add context to
|
||||
* @param {MongoFile[]} attachments - Array of file attachments
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async addFileContextToMessage(message, attachments) {
|
||||
const fileContext = await extractFileContext({
|
||||
attachments,
|
||||
req: this.options?.req,
|
||||
tokenCountFn: (text) => countTokens(text),
|
||||
});
|
||||
|
||||
if (fileContext) {
|
||||
message.fileContext = fileContext;
|
||||
}
|
||||
}
|
||||
|
||||
async processAttachments(message, attachments) {
|
||||
const categorizedAttachments = {
|
||||
images: [],
|
||||
videos: [],
|
||||
audios: [],
|
||||
documents: [],
|
||||
};
|
||||
|
||||
const allFiles = [];
|
||||
|
||||
for (const file of attachments) {
|
||||
/** @type {FileSources} */
|
||||
const source = file.source ?? FileSources.local;
|
||||
if (source === FileSources.text) {
|
||||
allFiles.push(file);
|
||||
continue;
|
||||
}
|
||||
if (file.embedded === true || file.metadata?.fileIdentifier != null) {
|
||||
allFiles.push(file);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (file.type.startsWith('image/')) {
|
||||
categorizedAttachments.images.push(file);
|
||||
} else if (file.type === 'application/pdf') {
|
||||
categorizedAttachments.documents.push(file);
|
||||
allFiles.push(file);
|
||||
} else if (file.type.startsWith('video/')) {
|
||||
categorizedAttachments.videos.push(file);
|
||||
allFiles.push(file);
|
||||
} else if (file.type.startsWith('audio/')) {
|
||||
categorizedAttachments.audios.push(file);
|
||||
allFiles.push(file);
|
||||
}
|
||||
}
|
||||
|
||||
const [imageFiles] = await Promise.all([
|
||||
categorizedAttachments.images.length > 0
|
||||
? this.addImageURLs(message, categorizedAttachments.images)
|
||||
: Promise.resolve([]),
|
||||
categorizedAttachments.documents.length > 0
|
||||
? this.addDocuments(message, categorizedAttachments.documents)
|
||||
: Promise.resolve([]),
|
||||
categorizedAttachments.videos.length > 0
|
||||
? this.addVideos(message, categorizedAttachments.videos)
|
||||
: Promise.resolve([]),
|
||||
categorizedAttachments.audios.length > 0
|
||||
? this.addAudios(message, categorizedAttachments.audios)
|
||||
: Promise.resolve([]),
|
||||
]);
|
||||
|
||||
allFiles.push(...imageFiles);
|
||||
|
||||
const seenFileIds = new Set();
|
||||
const uniqueFiles = [];
|
||||
|
||||
for (const file of allFiles) {
|
||||
if (file.file_id && !seenFileIds.has(file.file_id)) {
|
||||
seenFileIds.add(file.file_id);
|
||||
uniqueFiles.push(file);
|
||||
} else if (!file.file_id) {
|
||||
uniqueFiles.push(file);
|
||||
}
|
||||
}
|
||||
|
||||
return uniqueFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage[]} _messages
|
||||
* @returns {Promise<TMessage[]>}
|
||||
*/
|
||||
@@ -1384,8 +1151,7 @@ class BaseClient {
|
||||
{},
|
||||
);
|
||||
|
||||
await this.addFileContextToMessage(message, files);
|
||||
await this.processAttachments(message, files);
|
||||
await this.addImageURLs(message, files, this.visionMode);
|
||||
|
||||
this.message_file_map[message.messageId] = files;
|
||||
return message;
|
||||
|
||||
804
api/app/clients/ChatGPTClient.js
Normal file
804
api/app/clients/ChatGPTClient.js
Normal file
@@ -0,0 +1,804 @@
|
||||
const { Keyv } = require('keyv');
|
||||
const crypto = require('crypto');
|
||||
const { CohereClient } = require('cohere-ai');
|
||||
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const {
|
||||
ImageDetail,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
CohereConstants,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
|
||||
const { createContextHandlers } = require('./prompts');
|
||||
const { createCoherePayload } = require('./llm');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const CHATGPT_MODEL = 'gpt-3.5-turbo';
|
||||
const tokenizersCache = {};
|
||||
|
||||
class ChatGPTClient extends BaseClient {
|
||||
constructor(apiKey, options = {}, cacheOptions = {}) {
|
||||
super(apiKey, options, cacheOptions);
|
||||
|
||||
cacheOptions.namespace = cacheOptions.namespace || 'chatgpt';
|
||||
this.conversationsCache = new Keyv(cacheOptions);
|
||||
this.setOptions(options);
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
if (this.options && !this.options.replaceOptions) {
|
||||
// nested options aren't spread properly, so we need to do this manually
|
||||
this.options.modelOptions = {
|
||||
...this.options.modelOptions,
|
||||
...options.modelOptions,
|
||||
};
|
||||
delete options.modelOptions;
|
||||
// now we can merge options
|
||||
this.options = {
|
||||
...this.options,
|
||||
...options,
|
||||
};
|
||||
} else {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
if (this.options.openaiApiKey) {
|
||||
this.apiKey = this.options.openaiApiKey;
|
||||
}
|
||||
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
// set some good defaults (check for undefined in some cases because they may be 0)
|
||||
model: modelOptions.model || CHATGPT_MODEL,
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
|
||||
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
|
||||
presence_penalty:
|
||||
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
|
||||
stop: modelOptions.stop,
|
||||
};
|
||||
|
||||
this.isChatGptModel = this.modelOptions.model.includes('gpt-');
|
||||
const { isChatGptModel } = this;
|
||||
this.isUnofficialChatGptModel =
|
||||
this.modelOptions.model.startsWith('text-chat') ||
|
||||
this.modelOptions.model.startsWith('text-davinci-002-render');
|
||||
const { isUnofficialChatGptModel } = this;
|
||||
|
||||
// Davinci models have a max context length of 4097 tokens.
|
||||
this.maxContextTokens = this.options.maxContextTokens || (isChatGptModel ? 4095 : 4097);
|
||||
// I decided to reserve 1024 tokens for the response.
|
||||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.max_tokens || 1024;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
|
||||
if (this.maxPromptTokens + this.maxResponseTokens > this.maxContextTokens) {
|
||||
throw new Error(
|
||||
`maxPromptTokens + max_tokens (${this.maxPromptTokens} + ${this.maxResponseTokens} = ${
|
||||
this.maxPromptTokens + this.maxResponseTokens
|
||||
}) must be less than or equal to maxContextTokens (${this.maxContextTokens})`,
|
||||
);
|
||||
}
|
||||
|
||||
this.userLabel = this.options.userLabel || 'User';
|
||||
this.chatGptLabel = this.options.chatGptLabel || 'ChatGPT';
|
||||
|
||||
if (isChatGptModel) {
|
||||
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
|
||||
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
|
||||
// without tripping the stop sequences, so I'm using "||>" instead.
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
|
||||
} else if (isUnofficialChatGptModel) {
|
||||
this.startToken = '<|im_start|>';
|
||||
this.endToken = '<|im_end|>';
|
||||
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
|
||||
'<|im_start|>': 100264,
|
||||
'<|im_end|>': 100265,
|
||||
});
|
||||
} else {
|
||||
// Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting
|
||||
// system that causes only the first "<|endoftext|>" to be counted as 1 token, and the rest are not treated
|
||||
// as a single token. So we're using this instead.
|
||||
this.startToken = '||>';
|
||||
this.endToken = '';
|
||||
try {
|
||||
this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true);
|
||||
} catch {
|
||||
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true);
|
||||
}
|
||||
}
|
||||
|
||||
if (!this.modelOptions.stop) {
|
||||
const stopTokens = [this.startToken];
|
||||
if (this.endToken && this.endToken !== this.startToken) {
|
||||
stopTokens.push(this.endToken);
|
||||
}
|
||||
stopTokens.push(`\n${this.userLabel}:`);
|
||||
stopTokens.push('<|diff_marker|>');
|
||||
// I chose not to do one for `chatGptLabel` because I've never seen it happen
|
||||
this.modelOptions.stop = stopTokens;
|
||||
}
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
this.completionsUrl = this.options.reverseProxyUrl;
|
||||
} else if (isChatGptModel) {
|
||||
this.completionsUrl = 'https://api.openai.com/v1/chat/completions';
|
||||
} else {
|
||||
this.completionsUrl = 'https://api.openai.com/v1/completions';
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
if (tokenizersCache[encoding]) {
|
||||
return tokenizersCache[encoding];
|
||||
}
|
||||
let tokenizer;
|
||||
if (isModelName) {
|
||||
tokenizer = encodingForModel(encoding, extendSpecialTokens);
|
||||
} else {
|
||||
tokenizer = getEncoding(encoding, extendSpecialTokens);
|
||||
}
|
||||
tokenizersCache[encoding] = tokenizer;
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
/** @type {getCompletion} */
|
||||
async getCompletion(input, onProgress, onTokenProgress, abortController = null) {
|
||||
if (!abortController) {
|
||||
abortController = new AbortController();
|
||||
}
|
||||
|
||||
let modelOptions = { ...this.modelOptions };
|
||||
if (typeof onProgress === 'function') {
|
||||
modelOptions.stream = true;
|
||||
}
|
||||
if (this.isChatGptModel) {
|
||||
modelOptions.messages = input;
|
||||
} else {
|
||||
modelOptions.prompt = input;
|
||||
}
|
||||
|
||||
if (this.useOpenRouter && modelOptions.prompt) {
|
||||
delete modelOptions.stop;
|
||||
}
|
||||
|
||||
const { debug } = this.options;
|
||||
let baseURL = this.completionsUrl;
|
||||
if (debug) {
|
||||
console.debug();
|
||||
console.debug(baseURL);
|
||||
console.debug(modelOptions);
|
||||
console.debug();
|
||||
}
|
||||
|
||||
const opts = {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
};
|
||||
|
||||
if (this.isVisionModel) {
|
||||
modelOptions.max_tokens = 4000;
|
||||
}
|
||||
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
|
||||
|
||||
const isAzure = this.azure || this.options.azure;
|
||||
if (
|
||||
(isAzure && this.isVisionModel && azureConfig) ||
|
||||
(azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
|
||||
) {
|
||||
const { modelGroupMap, groupMap } = azureConfig;
|
||||
const {
|
||||
azureOptions,
|
||||
baseURL,
|
||||
headers = {},
|
||||
serverless,
|
||||
} = mapModelToAzureConfig({
|
||||
modelName: modelOptions.model,
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
});
|
||||
opts.headers = resolveHeaders(headers);
|
||||
this.langchainProxy = extractBaseURL(baseURL);
|
||||
this.apiKey = azureOptions.azureOpenAIApiKey;
|
||||
|
||||
const groupName = modelGroupMap[modelOptions.model].group;
|
||||
this.options.addParams = azureConfig.groupMap[groupName].addParams;
|
||||
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
|
||||
// Note: `forcePrompt` not re-assigned as only chat models are vision models
|
||||
|
||||
this.azure = !serverless && azureOptions;
|
||||
this.azureEndpoint =
|
||||
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
|
||||
if (serverless === true) {
|
||||
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
|
||||
? { 'api-version': azureOptions.azureOpenAIApiVersion }
|
||||
: undefined;
|
||||
this.options.headers['api-key'] = this.apiKey;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.defaultQuery) {
|
||||
opts.defaultQuery = this.options.defaultQuery;
|
||||
}
|
||||
|
||||
if (this.options.headers) {
|
||||
opts.headers = { ...opts.headers, ...this.options.headers };
|
||||
}
|
||||
|
||||
if (isAzure) {
|
||||
// Azure does not accept `model` in the body, so we need to remove it.
|
||||
delete modelOptions.model;
|
||||
|
||||
baseURL = this.langchainProxy
|
||||
? constructAzureURL({
|
||||
baseURL: this.langchainProxy,
|
||||
azureOptions: this.azure,
|
||||
})
|
||||
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
|
||||
|
||||
if (this.options.forcePrompt) {
|
||||
baseURL += '/completions';
|
||||
} else {
|
||||
baseURL += '/chat/completions';
|
||||
}
|
||||
|
||||
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
|
||||
opts.headers = { ...opts.headers, 'api-key': this.apiKey };
|
||||
} else if (this.apiKey) {
|
||||
opts.headers.Authorization = `Bearer ${this.apiKey}`;
|
||||
}
|
||||
|
||||
if (process.env.OPENAI_ORGANIZATION) {
|
||||
opts.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
|
||||
}
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
opts.headers['HTTP-Referer'] = 'https://librechat.ai';
|
||||
opts.headers['X-Title'] = 'LibreChat';
|
||||
}
|
||||
|
||||
/* hacky fixes for Mistral AI API:
|
||||
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
|
||||
- If there is only one message and it's a system message, change the role to user
|
||||
*/
|
||||
if (baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
|
||||
const { messages } = modelOptions;
|
||||
|
||||
const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
|
||||
if (systemMessageIndex > 0) {
|
||||
const [systemMessage] = messages.splice(systemMessageIndex, 1);
|
||||
messages.unshift(systemMessage);
|
||||
}
|
||||
|
||||
modelOptions.messages = messages;
|
||||
|
||||
if (messages.length === 1 && messages[0].role === 'system') {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
}
|
||||
|
||||
if (this.options.addParams && typeof this.options.addParams === 'object') {
|
||||
modelOptions = {
|
||||
...modelOptions,
|
||||
...this.options.addParams,
|
||||
};
|
||||
logger.debug('[ChatGPTClient] chatCompletion: added params', {
|
||||
addParams: this.options.addParams,
|
||||
modelOptions,
|
||||
});
|
||||
}
|
||||
|
||||
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
|
||||
this.options.dropParams.forEach((param) => {
|
||||
delete modelOptions[param];
|
||||
});
|
||||
logger.debug('[ChatGPTClient] chatCompletion: dropped params', {
|
||||
dropParams: this.options.dropParams,
|
||||
modelOptions,
|
||||
});
|
||||
}
|
||||
|
||||
if (baseURL.startsWith(CohereConstants.API_URL)) {
|
||||
const payload = createCoherePayload({ modelOptions });
|
||||
return await this.cohereChatCompletion({ payload, onTokenProgress });
|
||||
}
|
||||
|
||||
if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) {
|
||||
baseURL = baseURL.split('v1')[0] + 'v1/completions';
|
||||
} else if (
|
||||
baseURL.includes('v1') &&
|
||||
!baseURL.includes('/chat/completions') &&
|
||||
this.isChatCompletion
|
||||
) {
|
||||
baseURL = baseURL.split('v1')[0] + 'v1/chat/completions';
|
||||
}
|
||||
|
||||
const BASE_URL = new URL(baseURL);
|
||||
if (opts.defaultQuery) {
|
||||
Object.entries(opts.defaultQuery).forEach(([key, value]) => {
|
||||
BASE_URL.searchParams.append(key, value);
|
||||
});
|
||||
delete opts.defaultQuery;
|
||||
}
|
||||
|
||||
const completionsURL = BASE_URL.toString();
|
||||
opts.body = JSON.stringify(modelOptions);
|
||||
|
||||
if (modelOptions.stream) {
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
let done = false;
|
||||
await fetchEventSource(completionsURL, {
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
async onopen(response) {
|
||||
if (response.status === 200) {
|
||||
return;
|
||||
}
|
||||
if (debug) {
|
||||
console.debug(response);
|
||||
}
|
||||
let error;
|
||||
try {
|
||||
const body = await response.text();
|
||||
error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
error.status = response.status;
|
||||
error.json = JSON.parse(body);
|
||||
} catch {
|
||||
error = error || new Error(`Failed to send message. HTTP ${response.status}`);
|
||||
}
|
||||
throw error;
|
||||
},
|
||||
onclose() {
|
||||
if (debug) {
|
||||
console.debug('Server closed the connection unexpectedly, returning...');
|
||||
}
|
||||
// workaround for private API not sending [DONE] event
|
||||
if (!done) {
|
||||
onProgress('[DONE]');
|
||||
resolve();
|
||||
}
|
||||
},
|
||||
onerror(err) {
|
||||
if (debug) {
|
||||
console.debug(err);
|
||||
}
|
||||
// rethrow to stop the operation
|
||||
throw err;
|
||||
},
|
||||
onmessage(message) {
|
||||
if (debug) {
|
||||
console.debug(message);
|
||||
}
|
||||
if (!message.data || message.event === 'ping') {
|
||||
return;
|
||||
}
|
||||
if (message.data === '[DONE]') {
|
||||
onProgress('[DONE]');
|
||||
resolve();
|
||||
done = true;
|
||||
return;
|
||||
}
|
||||
onProgress(JSON.parse(message.data));
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
}
|
||||
const response = await fetch(completionsURL, {
|
||||
...opts,
|
||||
signal: abortController.signal,
|
||||
});
|
||||
if (response.status !== 200) {
|
||||
const body = await response.text();
|
||||
const error = new Error(`Failed to send message. HTTP ${response.status} - ${body}`);
|
||||
error.status = response.status;
|
||||
try {
|
||||
error.json = JSON.parse(body);
|
||||
} catch {
|
||||
error.body = body;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
|
||||
/** @type {cohereChatCompletion} */
|
||||
async cohereChatCompletion({ payload, onTokenProgress }) {
|
||||
const cohere = new CohereClient({
|
||||
token: this.apiKey,
|
||||
environment: this.completionsUrl,
|
||||
});
|
||||
|
||||
if (!payload.stream) {
|
||||
const chatResponse = await cohere.chat(payload);
|
||||
return chatResponse.text;
|
||||
}
|
||||
|
||||
const chatStream = await cohere.chatStream(payload);
|
||||
let reply = '';
|
||||
for await (const message of chatStream) {
|
||||
if (!message) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (message.eventType === 'text-generation' && message.text) {
|
||||
onTokenProgress(message.text);
|
||||
reply += message.text;
|
||||
}
|
||||
/*
|
||||
Cohere API Chinese Unicode character replacement hotfix.
|
||||
Should be un-commented when the following issue is resolved:
|
||||
https://github.com/cohere-ai/cohere-typescript/issues/151
|
||||
|
||||
else if (message.eventType === 'stream-end' && message.response) {
|
||||
reply = message.response.text;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
return reply;
|
||||
}
|
||||
|
||||
async generateTitle(userMessage, botMessage) {
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
content: `Write an extremely concise subtitle for this conversation with no more than a few words. All words should be capitalized. Exclude punctuation.
|
||||
|
||||
||>Message:
|
||||
${userMessage.message}
|
||||
||>Response:
|
||||
${botMessage.message}
|
||||
|
||||
||>Title:`,
|
||||
};
|
||||
|
||||
const titleGenClientOptions = JSON.parse(JSON.stringify(this.options));
|
||||
titleGenClientOptions.modelOptions = {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
presence_penalty: 0,
|
||||
frequency_penalty: 0,
|
||||
};
|
||||
const titleGenClient = new ChatGPTClient(this.apiKey, titleGenClientOptions);
|
||||
const result = await titleGenClient.getCompletion([instructionsPayload], null);
|
||||
// remove any non-alphanumeric characters, replace multiple spaces with 1, and then trim
|
||||
return result.choices[0].message.content
|
||||
.replace(/[^a-zA-Z0-9' ]/g, '')
|
||||
.replace(/\s+/g, ' ')
|
||||
.trim();
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
if (opts.clientOptions && typeof opts.clientOptions === 'object') {
|
||||
this.setOptions(opts.clientOptions);
|
||||
}
|
||||
|
||||
const conversationId = opts.conversationId || crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId || crypto.randomUUID();
|
||||
|
||||
let conversation =
|
||||
typeof opts.conversation === 'object'
|
||||
? opts.conversation
|
||||
: await this.conversationsCache.get(conversationId);
|
||||
|
||||
let isNewConversation = false;
|
||||
if (!conversation) {
|
||||
conversation = {
|
||||
messages: [],
|
||||
createdAt: Date.now(),
|
||||
};
|
||||
isNewConversation = true;
|
||||
}
|
||||
|
||||
const shouldGenerateTitle = opts.shouldGenerateTitle && isNewConversation;
|
||||
|
||||
const userMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
parentMessageId,
|
||||
role: 'User',
|
||||
message,
|
||||
};
|
||||
conversation.messages.push(userMessage);
|
||||
|
||||
// Doing it this way instead of having each message be a separate element in the array seems to be more reliable,
|
||||
// especially when it comes to keeping the AI in character. It also seems to improve coherency and context retention.
|
||||
const { prompt: payload, context } = await this.buildPrompt(
|
||||
conversation.messages,
|
||||
userMessage.id,
|
||||
{
|
||||
isChatGptModel: this.isChatGptModel,
|
||||
promptPrefix: opts.promptPrefix,
|
||||
},
|
||||
);
|
||||
|
||||
if (this.options.keepNecessaryMessagesOnly) {
|
||||
conversation.messages = context;
|
||||
}
|
||||
|
||||
let reply = '';
|
||||
let result = null;
|
||||
if (typeof opts.onProgress === 'function') {
|
||||
await this.getCompletion(
|
||||
payload,
|
||||
(progressMessage) => {
|
||||
if (progressMessage === '[DONE]') {
|
||||
return;
|
||||
}
|
||||
const token = this.isChatGptModel
|
||||
? progressMessage.choices[0].delta.content
|
||||
: progressMessage.choices[0].text;
|
||||
// first event's delta content is always undefined
|
||||
if (!token) {
|
||||
return;
|
||||
}
|
||||
if (this.options.debug) {
|
||||
console.debug(token);
|
||||
}
|
||||
if (token === this.endToken) {
|
||||
return;
|
||||
}
|
||||
opts.onProgress(token);
|
||||
reply += token;
|
||||
},
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
} else {
|
||||
result = await this.getCompletion(
|
||||
payload,
|
||||
null,
|
||||
opts.abortController || new AbortController(),
|
||||
);
|
||||
if (this.options.debug) {
|
||||
console.debug(JSON.stringify(result));
|
||||
}
|
||||
if (this.isChatGptModel) {
|
||||
reply = result.choices[0].message.content;
|
||||
} else {
|
||||
reply = result.choices[0].text.replace(this.endToken, '');
|
||||
}
|
||||
}
|
||||
|
||||
// avoids some rendering issues when using the CLI app
|
||||
if (this.options.debug) {
|
||||
console.debug();
|
||||
}
|
||||
|
||||
reply = reply.trim();
|
||||
|
||||
const replyMessage = {
|
||||
id: crypto.randomUUID(),
|
||||
parentMessageId: userMessage.id,
|
||||
role: 'ChatGPT',
|
||||
message: reply,
|
||||
};
|
||||
conversation.messages.push(replyMessage);
|
||||
|
||||
const returnData = {
|
||||
response: replyMessage.message,
|
||||
conversationId,
|
||||
parentMessageId: replyMessage.parentMessageId,
|
||||
messageId: replyMessage.id,
|
||||
details: result || {},
|
||||
};
|
||||
|
||||
if (shouldGenerateTitle) {
|
||||
conversation.title = await this.generateTitle(userMessage, replyMessage);
|
||||
returnData.title = conversation.title;
|
||||
}
|
||||
|
||||
await this.conversationsCache.set(conversationId, conversation);
|
||||
|
||||
if (this.options.returnConversation) {
|
||||
returnData.conversation = conversation;
|
||||
}
|
||||
|
||||
return returnData;
|
||||
}
|
||||
|
||||
async buildPrompt(messages, { isChatGptModel = false, promptPrefix = null }) {
|
||||
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
|
||||
|
||||
// Handle attachments and create augmentedPrompt
|
||||
if (this.options.attachments) {
|
||||
const attachments = await this.options.attachments;
|
||||
const lastMessage = messages[messages.length - 1];
|
||||
|
||||
if (this.message_file_map) {
|
||||
this.message_file_map[lastMessage.messageId] = attachments;
|
||||
} else {
|
||||
this.message_file_map = {
|
||||
[lastMessage.messageId]: attachments,
|
||||
};
|
||||
}
|
||||
|
||||
const files = await this.addImageURLs(lastMessage, attachments);
|
||||
this.options.attachments = files;
|
||||
|
||||
this.contextHandlers = createContextHandlers(this.options.req, lastMessage.text);
|
||||
}
|
||||
|
||||
if (this.message_file_map) {
|
||||
this.contextHandlers = createContextHandlers(
|
||||
this.options.req,
|
||||
messages[messages.length - 1].text,
|
||||
);
|
||||
}
|
||||
|
||||
// Calculate image token cost and process embedded files
|
||||
messages.forEach((message, i) => {
|
||||
if (this.message_file_map && this.message_file_map[message.messageId]) {
|
||||
const attachments = this.message_file_map[message.messageId];
|
||||
for (const file of attachments) {
|
||||
if (file.embedded) {
|
||||
this.contextHandlers?.processFile(file);
|
||||
continue;
|
||||
}
|
||||
|
||||
messages[i].tokenCount =
|
||||
(messages[i].tokenCount || 0) +
|
||||
this.calculateImageTokenCost({
|
||||
width: file.width,
|
||||
height: file.height,
|
||||
detail: this.options.imageDetail ?? ImageDetail.auto,
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (this.contextHandlers) {
|
||||
this.augmentedPrompt = await this.contextHandlers.createContext();
|
||||
promptPrefix = this.augmentedPrompt + promptPrefix;
|
||||
}
|
||||
|
||||
if (promptPrefix) {
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
|
||||
}
|
||||
const promptSuffix = `${this.startToken}${this.chatGptLabel}:\n`; // Prompt ChatGPT to respond.
|
||||
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
content: promptPrefix,
|
||||
};
|
||||
|
||||
const messagePayload = {
|
||||
role: 'system',
|
||||
content: promptSuffix,
|
||||
};
|
||||
|
||||
let currentTokenCount;
|
||||
if (isChatGptModel) {
|
||||
currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
} else {
|
||||
currentTokenCount = this.getTokenCount(`${promptPrefix}${promptSuffix}`);
|
||||
}
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
|
||||
const context = [];
|
||||
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && messages.length > 0) {
|
||||
const message = messages.pop();
|
||||
const roleLabel =
|
||||
message?.isCreatedByUser || message?.role?.toLowerCase() === 'user'
|
||||
? this.userLabel
|
||||
: this.chatGptLabel;
|
||||
const messageString = `${this.startToken}${roleLabel}:\n${
|
||||
message?.text ?? message?.message
|
||||
}${this.endToken}\n`;
|
||||
let newPromptBody;
|
||||
if (promptBody || isChatGptModel) {
|
||||
newPromptBody = `${messageString}${promptBody}`;
|
||||
} else {
|
||||
// Always insert prompt prefix before the last user message, if not gpt-3.5-turbo.
|
||||
// This makes the AI obey the prompt instructions better, which is important for custom instructions.
|
||||
// After a bunch of testing, it doesn't seem to cause the AI any confusion, even if you ask it things
|
||||
// like "what's the last thing I wrote?".
|
||||
newPromptBody = `${promptPrefix}${messageString}${promptBody}`;
|
||||
}
|
||||
|
||||
context.unshift(message);
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (promptBody) {
|
||||
// This message would put us over the token limit, so don't add it.
|
||||
return false;
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setImmediate(resolve));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
|
||||
const prompt = `${promptBody}${promptSuffix}`;
|
||||
if (isChatGptModel) {
|
||||
messagePayload.content = prompt;
|
||||
// Add 3 tokens for Assistant Label priming after all messages have been counted.
|
||||
currentTokenCount += 3;
|
||||
}
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
if (isChatGptModel) {
|
||||
return { prompt: [instructionsPayload, messagePayload], context };
|
||||
}
|
||||
return { prompt, context, promptTokens: currentTokenCount };
|
||||
}
|
||||
|
||||
getTokenCount(text) {
|
||||
return this.gptEncoder.encode(text, 'all').length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Algorithm adapted from "6. Counting tokens for chat API calls" of
|
||||
* https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
*
|
||||
* An additional 3 tokens need to be added for assistant label priming after all messages have been counted.
|
||||
*
|
||||
* @param {Object} message
|
||||
*/
|
||||
getTokenCountForMessage(message) {
|
||||
// Note: gpt-3.5-turbo and gpt-4 may update over time. Use default for these as well as for unknown models
|
||||
let tokensPerMessage = 3;
|
||||
let tokensPerName = 1;
|
||||
|
||||
if (this.modelOptions.model === 'gpt-3.5-turbo-0301') {
|
||||
tokensPerMessage = 4;
|
||||
tokensPerName = -1;
|
||||
}
|
||||
|
||||
let numTokens = tokensPerMessage;
|
||||
for (let [key, value] of Object.entries(message)) {
|
||||
numTokens += this.getTokenCount(value);
|
||||
if (key === 'name') {
|
||||
numTokens += tokensPerName;
|
||||
}
|
||||
}
|
||||
|
||||
return numTokens;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = ChatGPTClient;
|
||||
@@ -1,10 +1,6 @@
|
||||
const { google } = require('googleapis');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getModelMaxTokens } = require('@librechat/api');
|
||||
const { concat } = require('@langchain/core/utils/stream');
|
||||
const { ChatVertexAI } = require('@langchain/google-vertexai');
|
||||
const { Tokenizer, getSafetySettings } = require('@librechat/api');
|
||||
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
|
||||
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
|
||||
const { HumanMessage, SystemMessage } = require('@langchain/core/messages');
|
||||
@@ -15,15 +11,19 @@ const {
|
||||
endpointSettings,
|
||||
parseTextParts,
|
||||
EModelEndpoint,
|
||||
googleSettings,
|
||||
ContentTypes,
|
||||
VisionModes,
|
||||
ErrorTypes,
|
||||
Constants,
|
||||
AuthKeys,
|
||||
} = require('librechat-data-provider');
|
||||
const { getSafetySettings } = require('~/server/services/Endpoints/google/llm');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
const Tokenizer = require('~/server/services/Tokenizer');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
const {
|
||||
formatMessage,
|
||||
createContextHandlers,
|
||||
@@ -34,8 +34,7 @@ const BaseClient = require('./BaseClient');
|
||||
|
||||
const loc = process.env.GOOGLE_LOC || 'us-central1';
|
||||
const publisher = 'google';
|
||||
const endpointPrefix =
|
||||
loc === 'global' ? 'aiplatform.googleapis.com' : `${loc}-aiplatform.googleapis.com`;
|
||||
const endpointPrefix = `${loc}-aiplatform.googleapis.com`;
|
||||
|
||||
const settings = endpointSettings[EModelEndpoint.google];
|
||||
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
|
||||
@@ -166,16 +165,6 @@ class GoogleClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
// Add thinking configuration
|
||||
this.modelOptions.thinkingConfig = {
|
||||
thinkingBudget:
|
||||
(this.modelOptions.thinking ?? googleSettings.thinking.default)
|
||||
? this.modelOptions.thinkingBudget
|
||||
: 0,
|
||||
};
|
||||
delete this.modelOptions.thinking;
|
||||
delete this.modelOptions.thinkingBudget;
|
||||
|
||||
this.sender =
|
||||
this.options.sender ??
|
||||
getResponseSender({
|
||||
@@ -247,11 +236,11 @@ class GoogleClient extends BaseClient {
|
||||
msg.content = (
|
||||
!Array.isArray(msg.content)
|
||||
? [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: msg.content,
|
||||
},
|
||||
]
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: msg.content,
|
||||
},
|
||||
]
|
||||
: msg.content
|
||||
).concat(message.image_urls);
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { Ollama } = require('ollama');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { resolveHeaders } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { deriveBaseURL } = require('~/utils');
|
||||
const { deriveBaseURL, logAxiosError } = require('~/utils');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const ollamaPayloadSchema = z.object({
|
||||
mirostat: z.number().optional(),
|
||||
@@ -44,7 +43,6 @@ class OllamaClient {
|
||||
constructor(options = {}) {
|
||||
const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434');
|
||||
this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||
this.headers = options.headers ?? {};
|
||||
/** @type {Ollama} */
|
||||
this.client = new Ollama({ host });
|
||||
}
|
||||
@@ -52,32 +50,27 @@ class OllamaClient {
|
||||
/**
|
||||
* Fetches Ollama models from the specified base API path.
|
||||
* @param {string} baseURL
|
||||
* @param {Object} [options] - Optional configuration
|
||||
* @param {Partial<IUser>} [options.user] - User object for header resolution
|
||||
* @param {Record<string, string>} [options.headers] - Headers to include in the request
|
||||
* @returns {Promise<string[]>} The Ollama models.
|
||||
* @throws {Error} Throws if the Ollama API request fails
|
||||
*/
|
||||
static async fetchModels(baseURL, options = {}) {
|
||||
static async fetchModels(baseURL) {
|
||||
let models = [];
|
||||
if (!baseURL) {
|
||||
return models;
|
||||
}
|
||||
try {
|
||||
const ollamaEndpoint = deriveBaseURL(baseURL);
|
||||
/** @type {Promise<AxiosResponse<OllamaListResponse>>} */
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
|
||||
timeout: 5000,
|
||||
});
|
||||
models = response.data.models.map((tag) => tag.name);
|
||||
return models;
|
||||
} catch (error) {
|
||||
const logMessage =
|
||||
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
|
||||
logAxiosError({ message: logMessage, error });
|
||||
return [];
|
||||
}
|
||||
|
||||
const ollamaEndpoint = deriveBaseURL(baseURL);
|
||||
|
||||
const resolvedHeaders = resolveHeaders({
|
||||
headers: options.headers,
|
||||
user: options.user,
|
||||
});
|
||||
|
||||
/** @type {Promise<AxiosResponse<OllamaListResponse>>} */
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
|
||||
headers: resolvedHeaders,
|
||||
timeout: 5000,
|
||||
});
|
||||
|
||||
const models = response.data.models.map((tag) => tag.name);
|
||||
return models;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,23 +1,13 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { OllamaClient } = require('./OllamaClient');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { sleep, SplitStreamHandler, CustomOpenAIClient: OpenAI } = require('@librechat/agents');
|
||||
const {
|
||||
isEnabled,
|
||||
Tokenizer,
|
||||
createFetch,
|
||||
resolveHeaders,
|
||||
constructAzureURL,
|
||||
getModelMaxTokens,
|
||||
genAzureChatCompletion,
|
||||
getModelMaxOutputTokens,
|
||||
createStreamEventHandlers,
|
||||
} = require('@librechat/api');
|
||||
const { SplitStreamHandler, CustomOpenAIClient: OpenAI } = require('@librechat/agents');
|
||||
const {
|
||||
Constants,
|
||||
ImageDetail,
|
||||
ContentTypes,
|
||||
parseTextParts,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
KnownEndpoints,
|
||||
openAISettings,
|
||||
ImageDetailCost,
|
||||
@@ -26,6 +16,13 @@ const {
|
||||
validateVisionModel,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
extractBaseURL,
|
||||
constructAzureURL,
|
||||
getModelMaxTokens,
|
||||
genAzureChatCompletion,
|
||||
getModelMaxOutputTokens,
|
||||
} = require('~/utils');
|
||||
const {
|
||||
truncateText,
|
||||
formatMessage,
|
||||
@@ -34,19 +31,28 @@ const {
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { createFetch, createStreamEventHandlers } = require('./generators');
|
||||
const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils');
|
||||
const Tokenizer = require('~/server/services/Tokenizer');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { addSpaceIfNeeded } = require('~/server/utils');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const { OllamaClient } = require('./OllamaClient');
|
||||
const { createLLM, RunManager } = require('./llm');
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const { summaryBuffer } = require('./memory');
|
||||
const { runTitleChain } = require('./chains');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { tokenSplit } = require('./document');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class OpenAIClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
this.ChatGPTClient = new ChatGPTClient();
|
||||
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
|
||||
/** @type {getCompletion} */
|
||||
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
|
||||
/** @type {cohereChatCompletion} */
|
||||
this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this);
|
||||
this.contextStrategy = options.contextStrategy
|
||||
? options.contextStrategy.toLowerCase()
|
||||
: 'discard';
|
||||
@@ -373,12 +379,23 @@ class OpenAIClient extends BaseClient {
|
||||
return files;
|
||||
}
|
||||
|
||||
async buildMessages(messages, parentMessageId, { promptPrefix = null }, opts) {
|
||||
async buildMessages(
|
||||
messages,
|
||||
parentMessageId,
|
||||
{ isChatCompletion = false, promptPrefix = null },
|
||||
opts,
|
||||
) {
|
||||
let orderedMessages = this.constructor.getMessagesForConversation({
|
||||
messages,
|
||||
parentMessageId,
|
||||
summary: this.shouldSummarize,
|
||||
});
|
||||
if (!isChatCompletion) {
|
||||
return await this.buildPrompt(orderedMessages, {
|
||||
isChatGptModel: isChatCompletion,
|
||||
promptPrefix,
|
||||
});
|
||||
}
|
||||
|
||||
let payload;
|
||||
let instructions;
|
||||
@@ -613,8 +630,77 @@ class OpenAIClient extends BaseClient {
|
||||
return (reply ?? '').trim();
|
||||
}
|
||||
|
||||
initializeLLM() {
|
||||
throw new Error('Deprecated');
|
||||
initializeLLM({
|
||||
model = openAISettings.model.default,
|
||||
modelName,
|
||||
temperature = 0.2,
|
||||
max_tokens,
|
||||
streaming,
|
||||
context,
|
||||
tokenBuffer,
|
||||
initialMessageCount,
|
||||
conversationId,
|
||||
}) {
|
||||
const modelOptions = {
|
||||
modelName: modelName ?? model,
|
||||
temperature,
|
||||
user: this.user,
|
||||
};
|
||||
|
||||
if (max_tokens) {
|
||||
modelOptions.max_tokens = max_tokens;
|
||||
}
|
||||
|
||||
const configOptions = {};
|
||||
|
||||
if (this.langchainProxy) {
|
||||
configOptions.basePath = this.langchainProxy;
|
||||
}
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
configOptions.basePath = 'https://openrouter.ai/api/v1';
|
||||
configOptions.baseOptions = {
|
||||
headers: {
|
||||
'HTTP-Referer': 'https://librechat.ai',
|
||||
'X-Title': 'LibreChat',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const { headers } = this.options;
|
||||
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
|
||||
configOptions.baseOptions = {
|
||||
headers: resolveHeaders({
|
||||
...headers,
|
||||
...configOptions?.baseOptions?.headers,
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
if (this.options.proxy) {
|
||||
configOptions.httpAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
configOptions.httpsAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
const { req, res, debug } = this.options;
|
||||
const runManager = new RunManager({ req, res, debug, abortController: this.abortController });
|
||||
this.runManager = runManager;
|
||||
|
||||
const llm = createLLM({
|
||||
modelOptions,
|
||||
configOptions,
|
||||
openAIApiKey: this.apiKey,
|
||||
azure: this.azure,
|
||||
streaming,
|
||||
callbacks: runManager.createCallbacks({
|
||||
context,
|
||||
tokenBuffer,
|
||||
conversationId: this.conversationId ?? conversationId,
|
||||
initialMessageCount,
|
||||
}),
|
||||
});
|
||||
|
||||
return llm;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -632,7 +718,6 @@ class OpenAIClient extends BaseClient {
|
||||
* In case of failure, it will return the default title, "New Chat".
|
||||
*/
|
||||
async titleConvo({ text, conversationId, responseText = '' }) {
|
||||
const appConfig = this.options.req?.config;
|
||||
this.conversationId = conversationId;
|
||||
|
||||
if (this.options.attachments) {
|
||||
@@ -661,7 +746,8 @@ class OpenAIClient extends BaseClient {
|
||||
max_tokens: 16,
|
||||
};
|
||||
|
||||
const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
|
||||
|
||||
const resetTitleOptions = !!(
|
||||
(this.azure && azureConfig) ||
|
||||
@@ -681,7 +767,7 @@ class OpenAIClient extends BaseClient {
|
||||
groupMap,
|
||||
});
|
||||
|
||||
this.options.headers = resolveHeaders({ headers });
|
||||
this.options.headers = resolveHeaders(headers);
|
||||
this.options.reverseProxyUrl = baseURL ?? null;
|
||||
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
|
||||
this.apiKey = azureOptions.azureOpenAIApiKey;
|
||||
@@ -1050,7 +1136,6 @@ ${convo}
|
||||
}
|
||||
|
||||
async chatCompletion({ payload, onProgress, abortController = null }) {
|
||||
const appConfig = this.options.req?.config;
|
||||
let error = null;
|
||||
let intermediateReply = [];
|
||||
const errorCallback = (err) => (error = err);
|
||||
@@ -1074,7 +1159,6 @@ ${convo}
|
||||
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
|
||||
const opts = {
|
||||
baseURL,
|
||||
fetchOptions: {},
|
||||
};
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
@@ -1093,10 +1177,11 @@ ${convo}
|
||||
}
|
||||
|
||||
if (this.options.proxy) {
|
||||
opts.fetchOptions.agent = new HttpsProxyAgent(this.options.proxy);
|
||||
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
const azureConfig = appConfig?.endpoints?.[EModelEndpoint.azureOpenAI];
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
|
||||
|
||||
if (
|
||||
(this.azure && this.isVisionModel && azureConfig) ||
|
||||
@@ -1113,7 +1198,7 @@ ${convo}
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
});
|
||||
opts.defaultHeaders = resolveHeaders({ headers });
|
||||
opts.defaultHeaders = resolveHeaders(headers);
|
||||
this.langchainProxy = extractBaseURL(baseURL);
|
||||
this.apiKey = azureOptions.azureOpenAIApiKey;
|
||||
|
||||
@@ -1154,9 +1239,7 @@ ${convo}
|
||||
}
|
||||
|
||||
if (this.isOmni === true && modelOptions.max_tokens != null) {
|
||||
const paramName =
|
||||
modelOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
|
||||
modelOptions[paramName] = modelOptions.max_tokens;
|
||||
modelOptions.max_completion_tokens = modelOptions.max_tokens;
|
||||
delete modelOptions.max_tokens;
|
||||
}
|
||||
if (this.isOmni === true && modelOptions.temperature != null) {
|
||||
@@ -1312,7 +1395,7 @@ ${convo}
|
||||
...modelOptions,
|
||||
stream: true,
|
||||
};
|
||||
const stream = await openai.chat.completions
|
||||
const stream = await openai.beta.chat.completions
|
||||
.stream(params)
|
||||
.on('abort', () => {
|
||||
/* Do nothing here */
|
||||
|
||||
542
api/app/clients/PluginsClient.js
Normal file
542
api/app/clients/PluginsClient.js
Normal file
@@ -0,0 +1,542 @@
|
||||
const OpenAIClient = require('./OpenAIClient');
|
||||
const { CallbackManager } = require('@langchain/core/callbacks/manager');
|
||||
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
|
||||
const { processFileURL } = require('~/server/services/Files/process');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { formatLangChainMessages } = require('./prompts');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
this.sender = options.sender ?? 'Assistant';
|
||||
this.tools = [];
|
||||
this.actions = [];
|
||||
this.setOptions(options);
|
||||
this.openAIApiKey = this.apiKey;
|
||||
this.executor = null;
|
||||
}
|
||||
|
||||
setOptions(options) {
|
||||
this.agentOptions = { ...options.agentOptions };
|
||||
this.functionsAgent = this.agentOptions?.agent === 'functions';
|
||||
this.agentIsGpt3 = this.agentOptions?.model?.includes('gpt-3');
|
||||
|
||||
super.setOptions(options);
|
||||
|
||||
this.isGpt3 = this.modelOptions?.model?.includes('gpt-3');
|
||||
|
||||
if (this.options.reverseProxyUrl) {
|
||||
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
|
||||
}
|
||||
}
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
artifacts: this.options.artifacts,
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
modelLabel: this.options.modelLabel,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
tools: this.options.tools,
|
||||
...this.modelOptions,
|
||||
agentOptions: this.agentOptions,
|
||||
iconURL: this.options.iconURL,
|
||||
greeting: this.options.greeting,
|
||||
spec: this.options.spec,
|
||||
};
|
||||
}
|
||||
|
||||
saveLatestAction(action) {
|
||||
this.actions.push(action);
|
||||
}
|
||||
|
||||
getFunctionModelName(input) {
|
||||
if (/-(?!0314)\d{4}/.test(input)) {
|
||||
return input;
|
||||
} else if (input.includes('gpt-3.5-turbo')) {
|
||||
return 'gpt-3.5-turbo';
|
||||
} else if (input.includes('gpt-4')) {
|
||||
return 'gpt-4';
|
||||
} else {
|
||||
return 'gpt-3.5-turbo';
|
||||
}
|
||||
}
|
||||
|
||||
getBuildMessagesOptions(opts) {
|
||||
return {
|
||||
isChatCompletion: true,
|
||||
promptPrefix: opts.promptPrefix,
|
||||
abortController: opts.abortController,
|
||||
};
|
||||
}
|
||||
|
||||
async initialize({ user, message, onAgentAction, onChainEnd, signal }) {
|
||||
const modelOptions = {
|
||||
modelName: this.agentOptions.model,
|
||||
temperature: this.agentOptions.temperature,
|
||||
};
|
||||
|
||||
const model = this.initializeLLM({
|
||||
...modelOptions,
|
||||
context: 'plugins',
|
||||
initialMessageCount: this.currentMessages.length + 1,
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
`[PluginsClient] Agent Model: ${model.modelName} | Temp: ${model.temperature} | Functions: ${this.functionsAgent}`,
|
||||
);
|
||||
|
||||
// Map Messages to Langchain format
|
||||
const pastMessages = formatLangChainMessages(this.currentMessages.slice(0, -1), {
|
||||
userName: this.options?.name,
|
||||
});
|
||||
logger.debug('[PluginsClient] pastMessages: ' + pastMessages.length);
|
||||
|
||||
// TODO: use readOnly memory, TokenBufferMemory? (both unavailable in LangChainJS)
|
||||
const memory = new BufferMemory({
|
||||
llm: model,
|
||||
chatHistory: new ChatMessageHistory(pastMessages),
|
||||
});
|
||||
|
||||
const { loadedTools } = await loadTools({
|
||||
user,
|
||||
model,
|
||||
tools: this.options.tools,
|
||||
functions: this.functionsAgent,
|
||||
options: {
|
||||
memory,
|
||||
signal: this.abortController.signal,
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
conversationId: this.conversationId,
|
||||
fileStrategy: this.options.req.app.locals.fileStrategy,
|
||||
processFileURL,
|
||||
message,
|
||||
},
|
||||
useSpecs: true,
|
||||
});
|
||||
|
||||
if (loadedTools.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.tools = loadedTools;
|
||||
|
||||
logger.debug('[PluginsClient] Requested Tools', this.options.tools);
|
||||
logger.debug(
|
||||
'[PluginsClient] Loaded Tools',
|
||||
this.tools.map((tool) => tool.name),
|
||||
);
|
||||
|
||||
const handleAction = (action, runId, callback = null) => {
|
||||
this.saveLatestAction(action);
|
||||
|
||||
logger.debug('[PluginsClient] Latest Agent Action ', this.actions[this.actions.length - 1]);
|
||||
|
||||
if (typeof callback === 'function') {
|
||||
callback(action, runId);
|
||||
}
|
||||
};
|
||||
|
||||
// initialize agent
|
||||
const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent;
|
||||
|
||||
let customInstructions = (this.options.promptPrefix ?? '').trim();
|
||||
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
|
||||
customInstructions = `${customInstructions ?? ''}\n${this.options.artifactsPrompt}`.trim();
|
||||
}
|
||||
|
||||
this.executor = await initializer({
|
||||
model,
|
||||
signal,
|
||||
pastMessages,
|
||||
tools: this.tools,
|
||||
customInstructions,
|
||||
verbose: this.options.debug,
|
||||
returnIntermediateSteps: true,
|
||||
customName: this.options.chatGptLabel,
|
||||
currentDateString: this.currentDateString,
|
||||
callbackManager: CallbackManager.fromHandlers({
|
||||
async handleAgentAction(action, runId) {
|
||||
handleAction(action, runId, onAgentAction);
|
||||
},
|
||||
async handleChainEnd(action) {
|
||||
if (typeof onChainEnd === 'function') {
|
||||
onChainEnd(action);
|
||||
}
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
logger.debug('[PluginsClient] Loaded agent.');
|
||||
}
|
||||
|
||||
async executorCall(message, { signal, stream, onToolStart, onToolEnd }) {
|
||||
let errorMessage = '';
|
||||
const maxAttempts = 1;
|
||||
|
||||
for (let attempts = 1; attempts <= maxAttempts; attempts++) {
|
||||
const errorInput = buildErrorInput({
|
||||
message,
|
||||
errorMessage,
|
||||
actions: this.actions,
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
const input = attempts > 1 ? errorInput : message;
|
||||
|
||||
logger.debug(`[PluginsClient] Attempt ${attempts} of ${maxAttempts}`);
|
||||
|
||||
if (errorMessage.length > 0) {
|
||||
logger.debug('[PluginsClient] Caught error, input: ' + JSON.stringify(input));
|
||||
}
|
||||
|
||||
try {
|
||||
this.result = await this.executor.call({ input, signal }, [
|
||||
{
|
||||
async handleToolStart(...args) {
|
||||
await onToolStart(...args);
|
||||
},
|
||||
async handleToolEnd(...args) {
|
||||
await onToolEnd(...args);
|
||||
},
|
||||
async handleLLMEnd(output) {
|
||||
const { generations } = output;
|
||||
const { text } = generations[0][0];
|
||||
if (text && typeof stream === 'function') {
|
||||
await stream(text);
|
||||
}
|
||||
},
|
||||
},
|
||||
]);
|
||||
break; // Exit the loop if the function call is successful
|
||||
} catch (err) {
|
||||
logger.error('[PluginsClient] executorCall error:', err);
|
||||
if (attempts === maxAttempts) {
|
||||
const { run } = this.runManager.getRunByConversationId(this.conversationId);
|
||||
const defaultOutput = `Encountered an error while attempting to respond: ${err.message}`;
|
||||
this.result.output = run && run.error ? run.error : defaultOutput;
|
||||
this.result.errorMessage = run && run.error ? run.error : err.message;
|
||||
this.result.intermediateSteps = this.actions;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage} responseMessage
|
||||
* @param {Partial<TMessage>} saveOptions
|
||||
* @param {string} user
|
||||
* @returns
|
||||
*/
|
||||
async handleResponseMessage(responseMessage, saveOptions, user) {
|
||||
const { output, errorMessage, ...result } = this.result;
|
||||
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
|
||||
output,
|
||||
errorMessage,
|
||||
...result,
|
||||
});
|
||||
const { error } = responseMessage;
|
||||
if (!error) {
|
||||
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
|
||||
responseMessage.completionTokens = this.getTokenCount(responseMessage.text);
|
||||
}
|
||||
|
||||
// Record usage only when completion is skipped as it is already recorded in the agent phase.
|
||||
if (!this.agentOptions.skipCompletion && !error) {
|
||||
await this.recordTokenUsage(responseMessage);
|
||||
}
|
||||
|
||||
const databasePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
delete responseMessage.tokenCount;
|
||||
return { ...responseMessage, ...result, databasePromise };
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
/** @type {Promise<TMessage>} */
|
||||
let userMessagePromise;
|
||||
/** @type {{ filteredTools: string[], includedTools: string[] }} */
|
||||
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin));
|
||||
this.options.tools = tools;
|
||||
} else {
|
||||
const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin));
|
||||
this.options.tools = tools;
|
||||
}
|
||||
|
||||
// If a message is edited, no tools can be used.
|
||||
const completionMode = this.options.tools.length === 0 || opts.isEdited;
|
||||
if (completionMode) {
|
||||
this.setOptions(opts);
|
||||
return super.sendMessage(message, opts);
|
||||
}
|
||||
|
||||
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
|
||||
const {
|
||||
user,
|
||||
conversationId,
|
||||
responseMessageId,
|
||||
saveOptions,
|
||||
userMessage,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
onToolStart,
|
||||
onToolEnd,
|
||||
} = await this.handleStartMethods(message, opts);
|
||||
|
||||
if (opts.progressCallback) {
|
||||
opts.onProgress = opts.progressCallback.call(null, {
|
||||
...(opts.progressOptions ?? {}),
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
}
|
||||
|
||||
this.currentMessages.push(userMessage);
|
||||
|
||||
let {
|
||||
prompt: payload,
|
||||
tokenCountMap,
|
||||
promptTokens,
|
||||
} = await this.buildMessages(
|
||||
this.currentMessages,
|
||||
userMessage.messageId,
|
||||
this.getBuildMessagesOptions({
|
||||
promptPrefix: null,
|
||||
abortController: this.abortController,
|
||||
}),
|
||||
);
|
||||
|
||||
if (tokenCountMap) {
|
||||
logger.debug('[PluginsClient] tokenCountMap', { tokenCountMap });
|
||||
if (tokenCountMap[userMessage.messageId]) {
|
||||
userMessage.tokenCount = tokenCountMap[userMessage.messageId];
|
||||
logger.debug('[PluginsClient] userMessage.tokenCount', userMessage.tokenCount);
|
||||
}
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
}
|
||||
|
||||
this.result = {};
|
||||
if (payload) {
|
||||
this.currentMessages = payload;
|
||||
}
|
||||
|
||||
if (!this.skipSaveUserMessage) {
|
||||
userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
if (typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({
|
||||
userMessagePromise,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const balance = this.options.req?.app?.locals?.balance;
|
||||
if (balance?.enabled) {
|
||||
await checkBalance({
|
||||
req: this.options.req,
|
||||
res: this.options.res,
|
||||
txData: {
|
||||
user: this.user,
|
||||
tokenType: 'prompt',
|
||||
amount: promptTokens,
|
||||
debug: this.options.debug,
|
||||
model: this.modelOptions.model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const responseMessage = {
|
||||
endpoint: EModelEndpoint.gptPlugins,
|
||||
iconURL: this.options.iconURL,
|
||||
messageId: responseMessageId,
|
||||
conversationId,
|
||||
parentMessageId: userMessage.messageId,
|
||||
isCreatedByUser: false,
|
||||
model: this.modelOptions.model,
|
||||
sender: this.sender,
|
||||
promptTokens,
|
||||
};
|
||||
|
||||
await this.initialize({
|
||||
user,
|
||||
message,
|
||||
onAgentAction,
|
||||
onChainEnd,
|
||||
signal: this.abortController.signal,
|
||||
onProgress: opts.onProgress,
|
||||
});
|
||||
|
||||
// const stream = async (text) => {
|
||||
// await this.generateTextStream.call(this, text, opts.onProgress, { delay: 1 });
|
||||
// };
|
||||
await this.executorCall(message, {
|
||||
signal: this.abortController.signal,
|
||||
// stream,
|
||||
onToolStart,
|
||||
onToolEnd,
|
||||
});
|
||||
|
||||
// If message was aborted mid-generation
|
||||
if (this.result?.errorMessage?.length > 0 && this.result?.errorMessage?.includes('cancel')) {
|
||||
responseMessage.text = 'Cancelled.';
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
// If error occurred during generation (likely token_balance)
|
||||
if (this.result?.errorMessage?.length > 0) {
|
||||
responseMessage.error = true;
|
||||
responseMessage.text = this.result.output;
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.agentOptions.skipCompletion && this.result.output && this.functionsAgent) {
|
||||
const partialText = opts.getPartialText();
|
||||
const trimmedPartial = opts.getPartialText().replaceAll(':::plugin:::\n', '');
|
||||
responseMessage.text =
|
||||
trimmedPartial.length === 0 ? `${partialText}${this.result.output}` : partialText;
|
||||
addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
if (this.agentOptions.skipCompletion && this.result.output) {
|
||||
responseMessage.text = this.result.output;
|
||||
addImages(this.result.intermediateSteps, responseMessage);
|
||||
await this.generateTextStream(this.result.output, opts.onProgress, { delay: 5 });
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
logger.debug('[PluginsClient] Completion phase: this.result', this.result);
|
||||
|
||||
const promptPrefix = buildPromptPrefix({
|
||||
result: this.result,
|
||||
message,
|
||||
functionsAgent: this.functionsAgent,
|
||||
});
|
||||
|
||||
logger.debug('[PluginsClient]', { promptPrefix });
|
||||
|
||||
payload = await this.buildCompletionPrompt({
|
||||
messages: this.currentMessages,
|
||||
promptPrefix,
|
||||
});
|
||||
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt Payload', payload);
|
||||
responseMessage.text = await this.sendCompletion(payload, opts);
|
||||
return await this.handleResponseMessage(responseMessage, saveOptions, user);
|
||||
}
|
||||
|
||||
async buildCompletionPrompt({ messages, promptPrefix: _promptPrefix }) {
|
||||
logger.debug('[PluginsClient] buildCompletionPrompt messages', messages);
|
||||
|
||||
const orderedMessages = messages;
|
||||
let promptPrefix = _promptPrefix.trim();
|
||||
// If the prompt prefix doesn't end with the end token, add it.
|
||||
if (!promptPrefix.endsWith(`${this.endToken}`)) {
|
||||
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
|
||||
}
|
||||
promptPrefix = `${this.startToken}Instructions:\n${promptPrefix}`;
|
||||
const promptSuffix = `${this.startToken}${this.chatGptLabel ?? 'Assistant'}:\n`;
|
||||
|
||||
const instructionsPayload = {
|
||||
role: 'system',
|
||||
content: promptPrefix,
|
||||
};
|
||||
|
||||
const messagePayload = {
|
||||
role: 'system',
|
||||
content: promptSuffix,
|
||||
};
|
||||
|
||||
if (this.isGpt3) {
|
||||
instructionsPayload.role = 'user';
|
||||
messagePayload.role = 'user';
|
||||
instructionsPayload.content += `\n${promptSuffix}`;
|
||||
}
|
||||
|
||||
// testing if this works with browser endpoint
|
||||
if (!this.isGpt3 && this.options.reverseProxyUrl) {
|
||||
instructionsPayload.role = 'user';
|
||||
}
|
||||
|
||||
let currentTokenCount =
|
||||
this.getTokenCountForMessage(instructionsPayload) +
|
||||
this.getTokenCountForMessage(messagePayload);
|
||||
|
||||
let promptBody = '';
|
||||
const maxTokenCount = this.maxPromptTokens;
|
||||
// Iterate backwards through the messages, adding them to the prompt until we reach the max token count.
|
||||
// Do this within a recursive async function so that it doesn't block the event loop for too long.
|
||||
const buildPromptBody = async () => {
|
||||
if (currentTokenCount < maxTokenCount && orderedMessages.length > 0) {
|
||||
const message = orderedMessages.pop();
|
||||
const isCreatedByUser = message.isCreatedByUser || message.role?.toLowerCase() === 'user';
|
||||
const roleLabel = isCreatedByUser ? this.userLabel : this.chatGptLabel;
|
||||
let messageString = `${this.startToken}${roleLabel}:\n${
|
||||
message.text ?? message.content ?? ''
|
||||
}${this.endToken}\n`;
|
||||
let newPromptBody = `${messageString}${promptBody}`;
|
||||
|
||||
const tokenCountForMessage = this.getTokenCount(messageString);
|
||||
const newTokenCount = currentTokenCount + tokenCountForMessage;
|
||||
if (newTokenCount > maxTokenCount) {
|
||||
if (promptBody) {
|
||||
// This message would put us over the token limit, so don't add it.
|
||||
return false;
|
||||
}
|
||||
// This is the first message, so we can't add it. Just throw an error.
|
||||
throw new Error(
|
||||
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
|
||||
);
|
||||
}
|
||||
promptBody = newPromptBody;
|
||||
currentTokenCount = newTokenCount;
|
||||
// wait for next tick to avoid blocking the event loop
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
return buildPromptBody();
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
await buildPromptBody();
|
||||
const prompt = promptBody;
|
||||
messagePayload.content = prompt;
|
||||
// Add 2 tokens for metadata after all messages have been counted.
|
||||
currentTokenCount += 2;
|
||||
|
||||
if (this.isGpt3 && messagePayload.content.length > 0) {
|
||||
const context = 'Chat History:\n';
|
||||
messagePayload.content = `${context}${prompt}`;
|
||||
currentTokenCount += this.getTokenCount(context);
|
||||
}
|
||||
|
||||
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
|
||||
this.modelOptions.max_tokens = Math.min(
|
||||
this.maxContextTokens - currentTokenCount,
|
||||
this.maxResponseTokens,
|
||||
);
|
||||
|
||||
if (this.isGpt3) {
|
||||
messagePayload.content += promptSuffix;
|
||||
return [instructionsPayload, messagePayload];
|
||||
}
|
||||
|
||||
const result = [messagePayload, instructionsPayload];
|
||||
|
||||
if (this.functionsAgent && !this.isGpt3) {
|
||||
result[1].content = `${result[1].content}\n${this.startToken}${this.chatGptLabel}:\nSure thing! Here is the output you requested:\n`;
|
||||
}
|
||||
|
||||
return result.filter((message) => message.content.length > 0);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = PluginsClient;
|
||||
@@ -1,5 +1,5 @@
|
||||
const { Readable } = require('stream');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class TextStream extends Readable {
|
||||
constructor(text, options = {}) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { ZeroShotAgentOutputParser } = require('langchain/agents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class CustomOutputParser extends ZeroShotAgentOutputParser {
|
||||
constructor(fields) {
|
||||
|
||||
95
api/app/clients/callbacks/createStartHandler.js
Normal file
95
api/app/clients/callbacks/createStartHandler.js
Normal file
@@ -0,0 +1,95 @@
|
||||
const { promptTokensEstimate } = require('openai-chat-tokens');
|
||||
const { EModelEndpoint, supportsBalanceCheck } = require('librechat-data-provider');
|
||||
const { formatFromLangChain } = require('~/app/clients/prompts');
|
||||
const { getBalanceConfig } = require('~/server/services/Config');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const createStartHandler = ({
|
||||
context,
|
||||
conversationId,
|
||||
tokenBuffer = 0,
|
||||
initialMessageCount,
|
||||
manager,
|
||||
}) => {
|
||||
return async (_llm, _messages, runId, parentRunId, extraParams) => {
|
||||
const { invocation_params } = extraParams;
|
||||
const { model, functions, function_call } = invocation_params;
|
||||
const messages = _messages[0].map(formatFromLangChain);
|
||||
|
||||
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
||||
model,
|
||||
function_call,
|
||||
});
|
||||
|
||||
if (context !== 'title') {
|
||||
logger.debug(`[createStartHandler] handleChatModelStart: ${context}`, {
|
||||
functions,
|
||||
});
|
||||
}
|
||||
|
||||
const payload = { messages };
|
||||
let prelimPromptTokens = 1;
|
||||
|
||||
if (functions) {
|
||||
payload.functions = functions;
|
||||
prelimPromptTokens += 2;
|
||||
}
|
||||
|
||||
if (function_call) {
|
||||
payload.function_call = function_call;
|
||||
prelimPromptTokens -= 5;
|
||||
}
|
||||
|
||||
prelimPromptTokens += promptTokensEstimate(payload);
|
||||
logger.debug('[createStartHandler]', {
|
||||
prelimPromptTokens,
|
||||
tokenBuffer,
|
||||
});
|
||||
prelimPromptTokens += tokenBuffer;
|
||||
|
||||
try {
|
||||
const balance = await getBalanceConfig();
|
||||
if (balance?.enabled && supportsBalanceCheck[EModelEndpoint.openAI]) {
|
||||
const generations =
|
||||
initialMessageCount && messages.length > initialMessageCount
|
||||
? messages.slice(initialMessageCount)
|
||||
: null;
|
||||
await checkBalance({
|
||||
req: manager.req,
|
||||
res: manager.res,
|
||||
txData: {
|
||||
user: manager.user,
|
||||
tokenType: 'prompt',
|
||||
amount: prelimPromptTokens,
|
||||
debug: manager.debug,
|
||||
generations,
|
||||
model,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
},
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(`[createStartHandler][${context}] checkBalance error`, err);
|
||||
manager.abortController.abort();
|
||||
if (context === 'summary' || context === 'plugins') {
|
||||
manager.addRun(runId, { conversationId, error: err.message });
|
||||
throw new Error(err);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
manager.addRun(runId, {
|
||||
model,
|
||||
messages,
|
||||
functions,
|
||||
function_call,
|
||||
runId,
|
||||
parentRunId,
|
||||
conversationId,
|
||||
prelimPromptTokens,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = createStartHandler;
|
||||
5
api/app/clients/callbacks/index.js
Normal file
5
api/app/clients/callbacks/index.js
Normal file
@@ -0,0 +1,5 @@
|
||||
const createStartHandler = require('./createStartHandler');
|
||||
|
||||
module.exports = {
|
||||
createStartHandler,
|
||||
};
|
||||
@@ -1,7 +1,7 @@
|
||||
const { z } = require('zod');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { langPrompt, createTitlePrompt, escapeBraces, getSnippet } = require('../prompts');
|
||||
const { createStructuredOutputChainFromZod } = require('langchain/chains/openai_functions');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const langSchema = z.object({
|
||||
language: z.string().describe('The language of the input text (full noun, no abbreviations).'),
|
||||
|
||||
71
api/app/clients/generators.js
Normal file
71
api/app/clients/generators.js
Normal file
@@ -0,0 +1,71 @@
|
||||
const fetch = require('node-fetch');
|
||||
const { GraphEvents } = require('@librechat/agents');
|
||||
const { logger, sendEvent } = require('~/config');
|
||||
const { sleep } = require('~/server/utils');
|
||||
|
||||
/**
|
||||
* Makes a function to make HTTP request and logs the process.
|
||||
* @param {Object} params
|
||||
* @param {boolean} [params.directEndpoint] - Whether to use a direct endpoint.
|
||||
* @param {string} [params.reverseProxyUrl] - The reverse proxy URL to use for the request.
|
||||
* @returns {Promise<Response>} - A promise that resolves to the response of the fetch request.
|
||||
*/
|
||||
function createFetch({ directEndpoint = false, reverseProxyUrl = '' }) {
|
||||
/**
|
||||
* Makes an HTTP request and logs the process.
|
||||
* @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object.
|
||||
* @param {RequestInit} [init] - Optional init options for the request.
|
||||
* @returns {Promise<Response>} - A promise that resolves to the response of the fetch request.
|
||||
*/
|
||||
return async (_url, init) => {
|
||||
let url = _url;
|
||||
if (directEndpoint) {
|
||||
url = reverseProxyUrl;
|
||||
}
|
||||
logger.debug(`Making request to ${url}`);
|
||||
if (typeof Bun !== 'undefined') {
|
||||
return await fetch(url, init);
|
||||
}
|
||||
return await fetch(url, init);
|
||||
};
|
||||
}
|
||||
|
||||
// Add this at the module level outside the class
|
||||
/**
|
||||
* Creates event handlers for stream events that don't capture client references
|
||||
* @param {Object} res - The response object to send events to
|
||||
* @returns {Object} Object containing handler functions
|
||||
*/
|
||||
function createStreamEventHandlers(res) {
|
||||
return {
|
||||
[GraphEvents.ON_RUN_STEP]: (event) => {
|
||||
if (res) {
|
||||
sendEvent(res, event);
|
||||
}
|
||||
},
|
||||
[GraphEvents.ON_MESSAGE_DELTA]: (event) => {
|
||||
if (res) {
|
||||
sendEvent(res, event);
|
||||
}
|
||||
},
|
||||
[GraphEvents.ON_REASONING_DELTA]: (event) => {
|
||||
if (res) {
|
||||
sendEvent(res, event);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createHandleLLMNewToken(streamRate) {
|
||||
return async () => {
|
||||
if (streamRate) {
|
||||
await sleep(streamRate);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createFetch,
|
||||
createHandleLLMNewToken,
|
||||
createStreamEventHandlers,
|
||||
};
|
||||
@@ -1,11 +1,15 @@
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const OpenAIClient = require('./OpenAIClient');
|
||||
const PluginsClient = require('./PluginsClient');
|
||||
const GoogleClient = require('./GoogleClient');
|
||||
const TextStream = require('./TextStream');
|
||||
const AnthropicClient = require('./AnthropicClient');
|
||||
const toolUtils = require('./tools/util');
|
||||
|
||||
module.exports = {
|
||||
ChatGPTClient,
|
||||
OpenAIClient,
|
||||
PluginsClient,
|
||||
GoogleClient,
|
||||
TextStream,
|
||||
AnthropicClient,
|
||||
|
||||
105
api/app/clients/llm/RunManager.js
Normal file
105
api/app/clients/llm/RunManager.js
Normal file
@@ -0,0 +1,105 @@
|
||||
const { createStartHandler } = require('~/app/clients/callbacks');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class RunManager {
|
||||
constructor(fields) {
|
||||
const { req, res, abortController, debug } = fields;
|
||||
this.abortController = abortController;
|
||||
this.user = req.user.id;
|
||||
this.req = req;
|
||||
this.res = res;
|
||||
this.debug = debug;
|
||||
this.runs = new Map();
|
||||
this.convos = new Map();
|
||||
}
|
||||
|
||||
addRun(runId, runData) {
|
||||
if (!this.runs.has(runId)) {
|
||||
this.runs.set(runId, runData);
|
||||
if (runData.conversationId) {
|
||||
this.convos.set(runData.conversationId, runId);
|
||||
}
|
||||
return runData;
|
||||
} else {
|
||||
const existingData = this.runs.get(runId);
|
||||
const update = { ...existingData, ...runData };
|
||||
this.runs.set(runId, update);
|
||||
if (update.conversationId) {
|
||||
this.convos.set(update.conversationId, runId);
|
||||
}
|
||||
return update;
|
||||
}
|
||||
}
|
||||
|
||||
removeRun(runId) {
|
||||
if (this.runs.has(runId)) {
|
||||
this.runs.delete(runId);
|
||||
} else {
|
||||
logger.error(`[api/app/clients/llm/RunManager] Run with ID ${runId} does not exist.`);
|
||||
}
|
||||
}
|
||||
|
||||
getAllRuns() {
|
||||
return Array.from(this.runs.values());
|
||||
}
|
||||
|
||||
getRunById(runId) {
|
||||
return this.runs.get(runId);
|
||||
}
|
||||
|
||||
getRunByConversationId(conversationId) {
|
||||
const runId = this.convos.get(conversationId);
|
||||
return { run: this.runs.get(runId), runId };
|
||||
}
|
||||
|
||||
createCallbacks(metadata) {
|
||||
return [
|
||||
{
|
||||
handleChatModelStart: createStartHandler({ ...metadata, manager: this }),
|
||||
handleLLMEnd: async (output, runId, _parentRunId) => {
|
||||
const { llmOutput, ..._output } = output;
|
||||
logger.debug(`[RunManager] handleLLMEnd: ${JSON.stringify(metadata)}`, {
|
||||
runId,
|
||||
_parentRunId,
|
||||
llmOutput,
|
||||
});
|
||||
|
||||
if (metadata.context !== 'title') {
|
||||
logger.debug('[RunManager] handleLLMEnd:', {
|
||||
output: _output,
|
||||
});
|
||||
}
|
||||
|
||||
const { tokenUsage } = output.llmOutput;
|
||||
const run = this.getRunById(runId);
|
||||
this.removeRun(runId);
|
||||
|
||||
const txData = {
|
||||
user: this.user,
|
||||
model: run?.model ?? 'gpt-3.5-turbo',
|
||||
...metadata,
|
||||
};
|
||||
|
||||
await spendTokens(txData, tokenUsage);
|
||||
},
|
||||
handleLLMError: async (err) => {
|
||||
logger.error(`[RunManager] handleLLMError: ${JSON.stringify(metadata)}`, err);
|
||||
if (metadata.context === 'title') {
|
||||
return;
|
||||
} else if (metadata.context === 'plugins') {
|
||||
throw new Error(err);
|
||||
}
|
||||
const { conversationId } = metadata;
|
||||
const { run } = this.getRunByConversationId(conversationId);
|
||||
if (run && run.error) {
|
||||
const { error } = run;
|
||||
throw new Error(error);
|
||||
}
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = RunManager;
|
||||
82
api/app/clients/llm/createLLM.js
Normal file
82
api/app/clients/llm/createLLM.js
Normal file
@@ -0,0 +1,82 @@
|
||||
const { ChatOpenAI } = require('@langchain/openai');
|
||||
const { sanitizeModelName, constructAzureURL } = require('~/utils');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
/**
|
||||
* Creates a new instance of a language model (LLM) for chat interactions.
|
||||
*
|
||||
* @param {Object} options - The options for creating the LLM.
|
||||
* @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings.
|
||||
* @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers.
|
||||
* @param {Callbacks} [options.callbacks] - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count.
|
||||
* @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode.
|
||||
* @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication.
|
||||
* @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations.
|
||||
*
|
||||
* @returns {ChatOpenAI} An instance of the ChatOpenAI class, configured with the provided options.
|
||||
*
|
||||
* @example
|
||||
* const llm = createLLM({
|
||||
* modelOptions: { modelName: 'gpt-4o-mini', temperature: 0.2 },
|
||||
* configOptions: { basePath: 'https://example.api/path' },
|
||||
* callbacks: { onMessage: handleMessage },
|
||||
* openAIApiKey: 'your-api-key'
|
||||
* });
|
||||
*/
|
||||
function createLLM({
|
||||
modelOptions,
|
||||
configOptions,
|
||||
callbacks,
|
||||
streaming = false,
|
||||
openAIApiKey,
|
||||
azure = {},
|
||||
}) {
|
||||
let credentials = { openAIApiKey };
|
||||
let configuration = {
|
||||
apiKey: openAIApiKey,
|
||||
...(configOptions.basePath && { baseURL: configOptions.basePath }),
|
||||
};
|
||||
|
||||
/** @type {AzureOptions} */
|
||||
let azureOptions = {};
|
||||
if (azure) {
|
||||
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
|
||||
|
||||
credentials = {};
|
||||
configuration = {};
|
||||
azureOptions = azure;
|
||||
|
||||
azureOptions.azureOpenAIApiDeploymentName = useModelName
|
||||
? sanitizeModelName(modelOptions.modelName)
|
||||
: azureOptions.azureOpenAIApiDeploymentName;
|
||||
}
|
||||
|
||||
if (azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
|
||||
modelOptions.modelName = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
}
|
||||
|
||||
if (azure && configOptions.basePath) {
|
||||
const azureURL = constructAzureURL({
|
||||
baseURL: configOptions.basePath,
|
||||
azureOptions,
|
||||
});
|
||||
azureOptions.azureOpenAIBasePath = azureURL.split(
|
||||
`/${azureOptions.azureOpenAIApiDeploymentName}`,
|
||||
)[0];
|
||||
}
|
||||
|
||||
return new ChatOpenAI(
|
||||
{
|
||||
streaming,
|
||||
credentials,
|
||||
configuration,
|
||||
...azureOptions,
|
||||
...modelOptions,
|
||||
...credentials,
|
||||
callbacks,
|
||||
},
|
||||
configOptions,
|
||||
);
|
||||
}
|
||||
|
||||
module.exports = createLLM;
|
||||
@@ -1,5 +1,9 @@
|
||||
const createLLM = require('./createLLM');
|
||||
const RunManager = require('./RunManager');
|
||||
const createCoherePayload = require('./createCoherePayload');
|
||||
|
||||
module.exports = {
|
||||
createLLM,
|
||||
RunManager,
|
||||
createCoherePayload,
|
||||
};
|
||||
|
||||
31
api/app/clients/memory/summaryBuffer.demo.js
Normal file
31
api/app/clients/memory/summaryBuffer.demo.js
Normal file
@@ -0,0 +1,31 @@
|
||||
require('dotenv').config();
|
||||
const { ChatOpenAI } = require('@langchain/openai');
|
||||
const { getBufferString, ConversationSummaryBufferMemory } = require('langchain/memory');
|
||||
|
||||
const chatPromptMemory = new ConversationSummaryBufferMemory({
|
||||
llm: new ChatOpenAI({ modelName: 'gpt-4o-mini', temperature: 0 }),
|
||||
maxTokenLimit: 10,
|
||||
returnMessages: true,
|
||||
});
|
||||
|
||||
(async () => {
|
||||
await chatPromptMemory.saveContext({ input: 'hi my name\'s Danny' }, { output: 'whats up' });
|
||||
await chatPromptMemory.saveContext({ input: 'not much you' }, { output: 'not much' });
|
||||
await chatPromptMemory.saveContext(
|
||||
{ input: 'are you excited for the olympics?' },
|
||||
{ output: 'not really' },
|
||||
);
|
||||
|
||||
// We can also utilize the predict_new_summary method directly.
|
||||
const messages = await chatPromptMemory.chatHistory.getMessages();
|
||||
console.log('MESSAGES\n\n');
|
||||
console.log(JSON.stringify(messages));
|
||||
const previous_summary = '';
|
||||
const predictSummary = await chatPromptMemory.predictNewSummary(messages, previous_summary);
|
||||
console.log('SUMMARY\n\n');
|
||||
console.log(JSON.stringify(getBufferString([{ role: 'system', content: predictSummary }])));
|
||||
|
||||
// const { history } = await chatPromptMemory.loadMemoryVariables({});
|
||||
// console.log('HISTORY\n\n');
|
||||
// console.log(JSON.stringify(history));
|
||||
})();
|
||||
@@ -1,7 +1,7 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { ConversationSummaryBufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { formatLangChainMessages, SUMMARY_PROMPT } = require('../prompts');
|
||||
const { predictNewSummary } = require('../chains');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const createSummaryBufferMemory = ({ llm, prompt, messages, ...rest }) => {
|
||||
const chatHistory = new ChatMessageHistory(messages);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* The `addImages` function corrects any erroneous image URLs in the `responseMessage.text`
|
||||
|
||||
@@ -3,7 +3,6 @@ const { EModelEndpoint, ArtifactModes } = require('librechat-data-provider');
|
||||
const { generateShadcnPrompt } = require('~/app/clients/prompts/shadcn-docs/generate');
|
||||
const { components } = require('~/app/clients/prompts/shadcn-docs/components');
|
||||
|
||||
/** @deprecated */
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const artifactsPromptV1 = dedent`The assistant can create and reference artifacts during conversations.
|
||||
|
||||
@@ -116,7 +115,6 @@ Here are some examples of correct usage of artifacts:
|
||||
</assistant_response>
|
||||
</example>
|
||||
</examples>`;
|
||||
|
||||
const artifactsPrompt = dedent`The assistant can create and reference artifacts during conversations.
|
||||
|
||||
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
|
||||
@@ -167,10 +165,6 @@ Artifacts are for substantial, self-contained content that users might modify or
|
||||
- SVG: "image/svg+xml"
|
||||
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
|
||||
- The assistant should specify the viewbox of the SVG rather than defining a width/height
|
||||
- Markdown: "text/markdown" or "text/md"
|
||||
- The user interface will render Markdown content placed within the artifact tags.
|
||||
- Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more.
|
||||
- Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content.
|
||||
- Mermaid Diagrams: "application/vnd.mermaid"
|
||||
- The user interface will render Mermaid diagrams placed within the artifact tags.
|
||||
- React Components: "application/vnd.react"
|
||||
@@ -372,10 +366,6 @@ Artifacts are for substantial, self-contained content that users might modify or
|
||||
- SVG: "image/svg+xml"
|
||||
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
|
||||
- The assistant should specify the viewbox of the SVG rather than defining a width/height
|
||||
- Markdown: "text/markdown" or "text/md"
|
||||
- The user interface will render Markdown content placed within the artifact tags.
|
||||
- Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more.
|
||||
- Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content.
|
||||
- Mermaid Diagrams: "application/vnd.mermaid"
|
||||
- The user interface will render Mermaid diagrams placed within the artifact tags.
|
||||
- React Components: "application/vnd.react"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const axios = require('axios');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, generateShortLivedToken } = require('@librechat/api');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const footer = `Use the context as your learned knowledge to better answer the user.
|
||||
|
||||
@@ -18,7 +18,7 @@ function createContextHandlers(req, userMessageContent) {
|
||||
const queryPromises = [];
|
||||
const processedFiles = [];
|
||||
const processedIds = new Set();
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT);
|
||||
|
||||
const query = async (file) => {
|
||||
@@ -96,35 +96,35 @@ function createContextHandlers(req, userMessageContent) {
|
||||
resolvedQueries.length === 0
|
||||
? '\n\tThe semantic search did not return any results.'
|
||||
: resolvedQueries
|
||||
.map((queryResult, index) => {
|
||||
const file = processedFiles[index];
|
||||
let contextItems = queryResult.data;
|
||||
.map((queryResult, index) => {
|
||||
const file = processedFiles[index];
|
||||
let contextItems = queryResult.data;
|
||||
|
||||
const generateContext = (currentContext) =>
|
||||
`
|
||||
const generateContext = (currentContext) =>
|
||||
`
|
||||
<file>
|
||||
<filename>${file.filename}</filename>
|
||||
<context>${currentContext}
|
||||
</context>
|
||||
</file>`;
|
||||
|
||||
if (useFullContext) {
|
||||
return generateContext(`\n${contextItems}`);
|
||||
}
|
||||
if (useFullContext) {
|
||||
return generateContext(`\n${contextItems}`);
|
||||
}
|
||||
|
||||
contextItems = queryResult.data
|
||||
.map((item) => {
|
||||
const pageContent = item[0].page_content;
|
||||
return `
|
||||
contextItems = queryResult.data
|
||||
.map((item) => {
|
||||
const pageContent = item[0].page_content;
|
||||
return `
|
||||
<contextItem>
|
||||
<![CDATA[${pageContent?.trim()}]]>
|
||||
</contextItem>`;
|
||||
})
|
||||
.join('');
|
||||
})
|
||||
.join('');
|
||||
|
||||
return generateContext(contextItems);
|
||||
})
|
||||
.join('');
|
||||
return generateContext(contextItems);
|
||||
})
|
||||
.join('');
|
||||
|
||||
if (useFullContext) {
|
||||
const prompt = `${header}
|
||||
|
||||
@@ -237,9 +237,41 @@ const formatAgentMessages = (payload) => {
|
||||
return messages;
|
||||
};
|
||||
|
||||
/**
|
||||
* Formats an array of messages for LangChain, making sure all content fields are strings
|
||||
* @param {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} payload - The array of messages to format.
|
||||
* @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
|
||||
*/
|
||||
const formatContentStrings = (payload) => {
|
||||
const messages = [];
|
||||
|
||||
for (const message of payload) {
|
||||
if (typeof message.content === 'string') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!Array.isArray(message.content)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Reduce text types to a single string, ignore all other types
|
||||
const content = message.content.reduce((acc, curr) => {
|
||||
if (curr.type === ContentTypes.TEXT) {
|
||||
return `${acc}${curr[ContentTypes.TEXT]}\n`;
|
||||
}
|
||||
return acc;
|
||||
}, '');
|
||||
|
||||
message.content = content.trim();
|
||||
}
|
||||
|
||||
return messages;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
formatMessage,
|
||||
formatFromLangChain,
|
||||
formatAgentMessages,
|
||||
formatContentStrings,
|
||||
formatLangChainMessages,
|
||||
};
|
||||
|
||||
@@ -245,7 +245,7 @@ describe('AnthropicClient', () => {
|
||||
});
|
||||
|
||||
describe('Claude 4 model headers', () => {
|
||||
it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model', () => {
|
||||
it('should add "prompt-caching" beta header for claude-sonnet-4 model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
@@ -255,30 +255,10 @@ describe('AnthropicClient', () => {
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31,context-1m-2025-08-07',
|
||||
'prompt-caching-2024-07-31',
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model formats', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelVariations = [
|
||||
'claude-sonnet-4-20250514',
|
||||
'claude-sonnet-4-latest',
|
||||
'anthropic/claude-sonnet-4-20250514',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
const modelOptions = { model };
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31,context-1m-2025-08-07',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" beta header for claude-opus-4 model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
@@ -293,6 +273,20 @@ describe('AnthropicClient', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" beta header for claude-4-sonnet model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-4-sonnet-20250514',
|
||||
};
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31',
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" beta header for claude-4-opus model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
@@ -315,7 +309,7 @@ describe('AnthropicClient', () => {
|
||||
};
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeUndefined();
|
||||
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
|
||||
});
|
||||
|
||||
it('should not add beta header for other models', () => {
|
||||
@@ -326,7 +320,7 @@ describe('AnthropicClient', () => {
|
||||
},
|
||||
});
|
||||
const anthropicClient = client.getClient();
|
||||
expect(anthropicClient._options.defaultHeaders).toBeUndefined();
|
||||
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -2,14 +2,6 @@ const { Constants } = require('librechat-data-provider');
|
||||
const { initializeFakeClient } = require('./FakeClient');
|
||||
|
||||
jest.mock('~/db/connect');
|
||||
jest.mock('~/server/services/Config', () => ({
|
||||
getAppConfig: jest.fn().mockResolvedValue({
|
||||
// Default app config for tests
|
||||
paths: { uploads: '/tmp' },
|
||||
fileStrategy: 'local',
|
||||
memory: { disabled: false },
|
||||
}),
|
||||
}));
|
||||
jest.mock('~/models', () => ({
|
||||
User: jest.fn(),
|
||||
Key: jest.fn(),
|
||||
@@ -41,9 +33,7 @@ jest.mock('~/models', () => ({
|
||||
const { getConvo, saveConvo } = require('~/models');
|
||||
|
||||
jest.mock('@librechat/agents', () => {
|
||||
const { Providers } = jest.requireActual('@librechat/agents');
|
||||
return {
|
||||
Providers,
|
||||
ChatOpenAI: jest.fn().mockImplementation(() => {
|
||||
return {};
|
||||
}),
|
||||
@@ -430,46 +420,6 @@ describe('BaseClient', () => {
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('should replace responseMessageId with new UUID when isRegenerate is true and messageId ends with underscore', async () => {
|
||||
const mockCrypto = require('crypto');
|
||||
const newUUID = 'new-uuid-1234';
|
||||
jest.spyOn(mockCrypto, 'randomUUID').mockReturnValue(newUUID);
|
||||
|
||||
const opts = {
|
||||
isRegenerate: true,
|
||||
responseMessageId: 'existing-message-id_',
|
||||
};
|
||||
|
||||
await TestClient.setMessageOptions(opts);
|
||||
|
||||
expect(TestClient.responseMessageId).toBe(newUUID);
|
||||
expect(TestClient.responseMessageId).not.toBe('existing-message-id_');
|
||||
|
||||
mockCrypto.randomUUID.mockRestore();
|
||||
});
|
||||
|
||||
test('should not replace responseMessageId when isRegenerate is false', async () => {
|
||||
const opts = {
|
||||
isRegenerate: false,
|
||||
responseMessageId: 'existing-message-id_',
|
||||
};
|
||||
|
||||
await TestClient.setMessageOptions(opts);
|
||||
|
||||
expect(TestClient.responseMessageId).toBe('existing-message-id_');
|
||||
});
|
||||
|
||||
test('should not replace responseMessageId when it does not end with underscore', async () => {
|
||||
const opts = {
|
||||
isRegenerate: true,
|
||||
responseMessageId: 'existing-message-id',
|
||||
};
|
||||
|
||||
await TestClient.setMessageOptions(opts);
|
||||
|
||||
expect(TestClient.responseMessageId).toBe('existing-message-id');
|
||||
});
|
||||
|
||||
test('sendMessage should work with provided conversationId and parentMessageId', async () => {
|
||||
const userMessage = 'Second message in the conversation';
|
||||
const opts = {
|
||||
@@ -587,8 +537,6 @@ describe('BaseClient', () => {
|
||||
expect(onStart).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ text: 'Hello, world!' }),
|
||||
expect.any(String),
|
||||
/** `isNewConvo` */
|
||||
true,
|
||||
);
|
||||
});
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const { getModelMaxTokens } = require('@librechat/api');
|
||||
const BaseClient = require('../BaseClient');
|
||||
const { getModelMaxTokens } = require('../../../utils');
|
||||
|
||||
class FakeClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
|
||||
@@ -531,6 +531,44 @@ describe('OpenAIClient', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendMessage/getCompletion/chatCompletion', () => {
|
||||
afterEach(() => {
|
||||
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
|
||||
});
|
||||
|
||||
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {
|
||||
const model = 'text-davinci-003';
|
||||
const onProgress = jest.fn().mockImplementation(() => ({}));
|
||||
|
||||
const testClient = new OpenAIClient('test-api-key', {
|
||||
...defaultOptions,
|
||||
modelOptions: { model },
|
||||
});
|
||||
|
||||
const getCompletion = jest.spyOn(testClient, 'getCompletion');
|
||||
await testClient.sendMessage('Hi mom!', { onProgress });
|
||||
|
||||
expect(getCompletion).toHaveBeenCalled();
|
||||
expect(getCompletion.mock.calls.length).toBe(1);
|
||||
|
||||
expect(getCompletion.mock.calls[0][0]).toBe('||>User:\nHi mom!\n||>Assistant:\n');
|
||||
|
||||
expect(fetchEventSource).toHaveBeenCalled();
|
||||
expect(fetchEventSource.mock.calls.length).toBe(1);
|
||||
|
||||
// Check if the first argument (url) is correct
|
||||
const firstCallArgs = fetchEventSource.mock.calls[0];
|
||||
|
||||
const expectedURL = 'https://api.openai.com/v1/completions';
|
||||
expect(firstCallArgs[0]).toBe(expectedURL);
|
||||
|
||||
const requestBody = JSON.parse(firstCallArgs[1].body);
|
||||
expect(requestBody).toHaveProperty('model');
|
||||
expect(requestBody.model).toBe(model);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkVisionRequest functionality', () => {
|
||||
let client;
|
||||
const attachments = [{ type: 'image/png' }];
|
||||
|
||||
314
api/app/clients/specs/PluginsClient.test.js
Normal file
314
api/app/clients/specs/PluginsClient.test.js
Normal file
@@ -0,0 +1,314 @@
|
||||
const crypto = require('crypto');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
|
||||
const PluginsClient = require('../PluginsClient');
|
||||
|
||||
jest.mock('~/db/connect');
|
||||
jest.mock('~/models/Conversation', () => {
|
||||
return function () {
|
||||
return {
|
||||
save: jest.fn(),
|
||||
deleteConvos: jest.fn(),
|
||||
};
|
||||
};
|
||||
});
|
||||
|
||||
const defaultAzureOptions = {
|
||||
azureOpenAIApiInstanceName: 'your-instance-name',
|
||||
azureOpenAIApiDeploymentName: 'your-deployment-name',
|
||||
azureOpenAIApiVersion: '2020-07-01-preview',
|
||||
};
|
||||
|
||||
describe('PluginsClient', () => {
|
||||
let TestAgent;
|
||||
let options = {
|
||||
tools: [],
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2,
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
},
|
||||
};
|
||||
let parentMessageId;
|
||||
let conversationId;
|
||||
const fakeMessages = [];
|
||||
const userMessage = 'Hello, ChatGPT!';
|
||||
const apiKey = 'fake-api-key';
|
||||
|
||||
beforeEach(() => {
|
||||
TestAgent = new PluginsClient(apiKey, options);
|
||||
TestAgent.loadHistory = jest
|
||||
.fn()
|
||||
.mockImplementation((conversationId, parentMessageId = null) => {
|
||||
if (!conversationId) {
|
||||
TestAgent.currentMessages = [];
|
||||
return Promise.resolve([]);
|
||||
}
|
||||
|
||||
const orderedMessages = TestAgent.constructor.getMessagesForConversation({
|
||||
messages: fakeMessages,
|
||||
parentMessageId,
|
||||
});
|
||||
|
||||
const chatMessages = orderedMessages.map((msg) =>
|
||||
msg?.isCreatedByUser || msg?.role?.toLowerCase() === 'user'
|
||||
? new HumanMessage(msg.text)
|
||||
: new AIMessage(msg.text),
|
||||
);
|
||||
|
||||
TestAgent.currentMessages = orderedMessages;
|
||||
return Promise.resolve(chatMessages);
|
||||
});
|
||||
TestAgent.sendMessage = jest.fn().mockImplementation(async (message, opts = {}) => {
|
||||
if (opts && typeof opts === 'object') {
|
||||
TestAgent.setOptions(opts);
|
||||
}
|
||||
const conversationId = opts.conversationId || crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId || Constants.NO_PARENT;
|
||||
const userMessageId = opts.overrideParentMessageId || crypto.randomUUID();
|
||||
this.pastMessages = await TestAgent.loadHistory(
|
||||
conversationId,
|
||||
TestAgent.options?.parentMessageId,
|
||||
);
|
||||
|
||||
const userMessage = {
|
||||
text: message,
|
||||
sender: 'ChatGPT',
|
||||
isCreatedByUser: true,
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId,
|
||||
};
|
||||
|
||||
const response = {
|
||||
sender: 'ChatGPT',
|
||||
text: 'Hello, User!',
|
||||
isCreatedByUser: false,
|
||||
messageId: crypto.randomUUID(),
|
||||
parentMessageId: userMessage.messageId,
|
||||
conversationId,
|
||||
};
|
||||
|
||||
fakeMessages.push(userMessage);
|
||||
fakeMessages.push(response);
|
||||
return response;
|
||||
});
|
||||
});
|
||||
|
||||
test('initializes PluginsClient without crashing', () => {
|
||||
expect(TestAgent).toBeInstanceOf(PluginsClient);
|
||||
});
|
||||
|
||||
test('check setOptions function', () => {
|
||||
expect(TestAgent.agentIsGpt3).toBe(true);
|
||||
});
|
||||
|
||||
describe('sendMessage', () => {
|
||||
test('sendMessage should return a response message', async () => {
|
||||
const expectedResult = expect.objectContaining({
|
||||
sender: 'ChatGPT',
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: expect.any(String),
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage);
|
||||
parentMessageId = response.messageId;
|
||||
conversationId = response.conversationId;
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('sendMessage should work with provided conversationId and parentMessageId', async () => {
|
||||
const userMessage = 'Second message in the conversation';
|
||||
const opts = {
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
};
|
||||
|
||||
const expectedResult = expect.objectContaining({
|
||||
sender: 'ChatGPT',
|
||||
text: expect.any(String),
|
||||
isCreatedByUser: false,
|
||||
messageId: expect.any(String),
|
||||
parentMessageId: expect.any(String),
|
||||
conversationId: opts.conversationId,
|
||||
});
|
||||
|
||||
const response = await TestAgent.sendMessage(userMessage, opts);
|
||||
parentMessageId = response.messageId;
|
||||
expect(response.conversationId).toEqual(conversationId);
|
||||
expect(response).toEqual(expectedResult);
|
||||
});
|
||||
|
||||
test('should return chat history', async () => {
|
||||
const chatMessages = await TestAgent.loadHistory(conversationId, parentMessageId);
|
||||
expect(TestAgent.currentMessages).toHaveLength(4);
|
||||
expect(chatMessages[0].text).toEqual(userMessage);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getFunctionModelName', () => {
|
||||
let client;
|
||||
|
||||
beforeEach(() => {
|
||||
client = new PluginsClient('dummy_api_key');
|
||||
});
|
||||
|
||||
test('should return the input when it includes a dash followed by four digits', () => {
|
||||
expect(client.getFunctionModelName('-1234')).toBe('-1234');
|
||||
expect(client.getFunctionModelName('gpt-4-5678-preview')).toBe('gpt-4-5678-preview');
|
||||
});
|
||||
|
||||
test('should return the input for all function-capable models (`0613` models and above)', () => {
|
||||
expect(client.getFunctionModelName('gpt-4-0613')).toBe('gpt-4-0613');
|
||||
expect(client.getFunctionModelName('gpt-4-32k-0613')).toBe('gpt-4-32k-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-0613')).toBe('gpt-3.5-turbo-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0613')).toBe('gpt-3.5-turbo-16k-0613');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-1106')).toBe('gpt-3.5-turbo-1106');
|
||||
expect(client.getFunctionModelName('gpt-4-1106-preview')).toBe('gpt-4-1106-preview');
|
||||
expect(client.getFunctionModelName('gpt-4-1106')).toBe('gpt-4-1106');
|
||||
});
|
||||
|
||||
test('should return the corresponding model if input is non-function capable (`0314` models)', () => {
|
||||
expect(client.getFunctionModelName('gpt-4-0314')).toBe('gpt-4');
|
||||
expect(client.getFunctionModelName('gpt-4-32k-0314')).toBe('gpt-4');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-0314')).toBe('gpt-3.5-turbo');
|
||||
expect(client.getFunctionModelName('gpt-3.5-turbo-16k-0314')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
test('should return "gpt-3.5-turbo" when the input includes "gpt-3.5-turbo"', () => {
|
||||
expect(client.getFunctionModelName('test gpt-3.5-turbo model')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
test('should return "gpt-4" when the input includes "gpt-4"', () => {
|
||||
expect(client.getFunctionModelName('testing gpt-4')).toBe('gpt-4');
|
||||
});
|
||||
|
||||
test('should return "gpt-3.5-turbo" for input that does not meet any specific condition', () => {
|
||||
expect(client.getFunctionModelName('random string')).toBe('gpt-3.5-turbo');
|
||||
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Azure OpenAI tests specific to Plugins', () => {
|
||||
// TODO: add more tests for Azure OpenAI integration with Plugins
|
||||
// let client;
|
||||
// beforeEach(() => {
|
||||
// client = new PluginsClient('dummy_api_key');
|
||||
// });
|
||||
|
||||
test('should not call getFunctionModelName when azure options are set', () => {
|
||||
const spy = jest.spyOn(PluginsClient.prototype, 'getFunctionModelName');
|
||||
const model = 'gpt-4-turbo';
|
||||
|
||||
// note, without the azure change in PR #1766, `getFunctionModelName` is called twice
|
||||
const testClient = new PluginsClient('dummy_api_key', {
|
||||
agentOptions: {
|
||||
model,
|
||||
agent: 'functions',
|
||||
},
|
||||
azure: defaultAzureOptions,
|
||||
});
|
||||
|
||||
expect(spy).not.toHaveBeenCalled();
|
||||
expect(testClient.agentOptions.model).toBe(model);
|
||||
|
||||
spy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendMessage with filtered tools', () => {
|
||||
let TestAgent;
|
||||
const apiKey = 'fake-api-key';
|
||||
const mockTools = [{ name: 'tool1' }, { name: 'tool2' }, { name: 'tool3' }, { name: 'tool4' }];
|
||||
|
||||
beforeEach(() => {
|
||||
TestAgent = new PluginsClient(apiKey, {
|
||||
tools: mockTools,
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2,
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
},
|
||||
});
|
||||
|
||||
TestAgent.options.req = {
|
||||
app: {
|
||||
locals: {},
|
||||
},
|
||||
};
|
||||
|
||||
TestAgent.sendMessage = jest.fn().mockImplementation(async () => {
|
||||
const { filteredTools = [], includedTools = [] } = TestAgent.options.req.app.locals;
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
const tools = TestAgent.options.tools.filter((plugin) =>
|
||||
includedTools.includes(plugin.name),
|
||||
);
|
||||
TestAgent.options.tools = tools;
|
||||
} else {
|
||||
const tools = TestAgent.options.tools.filter(
|
||||
(plugin) => !filteredTools.includes(plugin.name),
|
||||
);
|
||||
TestAgent.options.tools = tools;
|
||||
}
|
||||
|
||||
return {
|
||||
text: 'Mocked response',
|
||||
tools: TestAgent.options.tools,
|
||||
};
|
||||
});
|
||||
});
|
||||
|
||||
test('should filter out tools when filteredTools is provided', async () => {
|
||||
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
expect.objectContaining({ name: 'tool4' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should only include specified tools when includedTools is provided', async () => {
|
||||
TestAgent.options.req.app.locals.includedTools = ['tool2', 'tool4'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
expect.objectContaining({ name: 'tool4' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should prioritize includedTools over filteredTools', async () => {
|
||||
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
|
||||
TestAgent.options.req.app.locals.includedTools = ['tool1', 'tool2'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool1' }),
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should not modify tools when no filters are provided', async () => {
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(4);
|
||||
expect(response.tools).toEqual(expect.arrayContaining(mockTools));
|
||||
});
|
||||
});
|
||||
});
|
||||
184
api/app/clients/tools/dynamic/OpenAPIPlugin.js
Normal file
184
api/app/clients/tools/dynamic/OpenAPIPlugin.js
Normal file
@@ -0,0 +1,184 @@
|
||||
require('dotenv').config();
|
||||
const fs = require('fs');
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const yaml = require('js-yaml');
|
||||
const { createOpenAPIChain } = require('langchain/chains');
|
||||
const { DynamicStructuredTool } = require('@langchain/core/tools');
|
||||
const { ChatPromptTemplate, HumanMessagePromptTemplate } = require('@langchain/core/prompts');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
function addLinePrefix(text, prefix = '// ') {
|
||||
return text
|
||||
.split('\n')
|
||||
.map((line) => prefix + line)
|
||||
.join('\n');
|
||||
}
|
||||
|
||||
function createPrompt(name, functions) {
|
||||
const prefix = `// The ${name} tool has the following functions. Determine the desired or most optimal function for the user's query:`;
|
||||
const functionDescriptions = functions
|
||||
.map((func) => `// - ${func.name}: ${func.description}`)
|
||||
.join('\n');
|
||||
return `${prefix}\n${functionDescriptions}
|
||||
// You are an expert manager and scrum master. You must provide a detailed intent to better execute the function.
|
||||
// Always format as such: {{"func": "function_name", "intent": "intent and expected result"}}`;
|
||||
}
|
||||
|
||||
const AuthBearer = z
|
||||
.object({
|
||||
type: z.string().includes('service_http'),
|
||||
authorization_type: z.string().includes('bearer'),
|
||||
verification_tokens: z.object({
|
||||
openai: z.string(),
|
||||
}),
|
||||
})
|
||||
.catch(() => false);
|
||||
|
||||
const AuthDefinition = z
|
||||
.object({
|
||||
type: z.string(),
|
||||
authorization_type: z.string(),
|
||||
verification_tokens: z.object({
|
||||
openai: z.string(),
|
||||
}),
|
||||
})
|
||||
.catch(() => false);
|
||||
|
||||
async function readSpecFile(filePath) {
|
||||
try {
|
||||
const fileContents = await fs.promises.readFile(filePath, 'utf8');
|
||||
if (path.extname(filePath) === '.json') {
|
||||
return JSON.parse(fileContents);
|
||||
}
|
||||
return yaml.load(fileContents);
|
||||
} catch (e) {
|
||||
logger.error('[readSpecFile] error', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async function getSpec(url) {
|
||||
const RegularUrl = z
|
||||
.string()
|
||||
.url()
|
||||
.catch(() => false);
|
||||
|
||||
if (RegularUrl.parse(url) && path.extname(url) === '.json') {
|
||||
const response = await fetch(url);
|
||||
return await response.json();
|
||||
}
|
||||
|
||||
const ValidSpecPath = z
|
||||
.string()
|
||||
.url()
|
||||
.catch(async () => {
|
||||
const spec = path.join(__dirname, '..', '.well-known', 'openapi', url);
|
||||
if (!fs.existsSync(spec)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return await readSpecFile(spec);
|
||||
});
|
||||
|
||||
return ValidSpecPath.parse(url);
|
||||
}
|
||||
|
||||
async function createOpenAPIPlugin({ data, llm, user, message, memory, signal }) {
|
||||
let spec;
|
||||
try {
|
||||
spec = await getSpec(data.api.url);
|
||||
} catch (error) {
|
||||
logger.error('[createOpenAPIPlugin] getSpec error', error);
|
||||
return null;
|
||||
}
|
||||
|
||||
if (!spec) {
|
||||
logger.warn('[createOpenAPIPlugin] No spec found');
|
||||
return null;
|
||||
}
|
||||
|
||||
const headers = {};
|
||||
const { auth, name_for_model, description_for_model, description_for_human } = data;
|
||||
if (auth && AuthDefinition.parse(auth)) {
|
||||
logger.debug('[createOpenAPIPlugin] auth detected', auth);
|
||||
const { openai } = auth.verification_tokens;
|
||||
if (AuthBearer.parse(auth)) {
|
||||
headers.authorization = `Bearer ${openai}`;
|
||||
logger.debug('[createOpenAPIPlugin] added auth bearer', headers);
|
||||
}
|
||||
}
|
||||
|
||||
const chainOptions = { llm };
|
||||
|
||||
if (data.headers && data.headers['librechat_user_id']) {
|
||||
logger.debug('[createOpenAPIPlugin] id detected', headers);
|
||||
headers[data.headers['librechat_user_id']] = user;
|
||||
}
|
||||
|
||||
if (Object.keys(headers).length > 0) {
|
||||
logger.debug('[createOpenAPIPlugin] headers detected', headers);
|
||||
chainOptions.headers = headers;
|
||||
}
|
||||
|
||||
if (data.params) {
|
||||
logger.debug('[createOpenAPIPlugin] params detected', data.params);
|
||||
chainOptions.params = data.params;
|
||||
}
|
||||
|
||||
let history = '';
|
||||
if (memory) {
|
||||
logger.debug('[createOpenAPIPlugin] openAPI chain: memory detected', memory);
|
||||
const { history: chat_history } = await memory.loadMemoryVariables({});
|
||||
history = chat_history?.length > 0 ? `\n\n## Chat History:\n${chat_history}\n` : '';
|
||||
}
|
||||
|
||||
chainOptions.prompt = ChatPromptTemplate.fromMessages([
|
||||
HumanMessagePromptTemplate.fromTemplate(
|
||||
`# Use the provided API's to respond to this query:\n\n{query}\n\n## Instructions:\n${addLinePrefix(
|
||||
description_for_model,
|
||||
)}${history}`,
|
||||
),
|
||||
]);
|
||||
|
||||
const chain = await createOpenAPIChain(spec, chainOptions);
|
||||
|
||||
const { functions } = chain.chains[0].lc_kwargs.llmKwargs;
|
||||
|
||||
return new DynamicStructuredTool({
|
||||
name: name_for_model,
|
||||
description_for_model: `${addLinePrefix(description_for_human)}${createPrompt(
|
||||
name_for_model,
|
||||
functions,
|
||||
)}`,
|
||||
description: `${description_for_human}`,
|
||||
schema: z.object({
|
||||
func: z
|
||||
.string()
|
||||
.describe(
|
||||
`The function to invoke. The functions available are: ${functions
|
||||
.map((func) => func.name)
|
||||
.join(', ')}`,
|
||||
),
|
||||
intent: z
|
||||
.string()
|
||||
.describe('Describe your intent with the function and your expected result'),
|
||||
}),
|
||||
func: async ({ func = '', intent = '' }) => {
|
||||
const filteredFunctions = functions.filter((f) => f.name === func);
|
||||
chain.chains[0].lc_kwargs.llmKwargs.functions = filteredFunctions;
|
||||
const query = `${message}${func?.length > 0 ? `\n// Intent: ${intent}` : ''}`;
|
||||
const result = await chain.call({
|
||||
query,
|
||||
signal,
|
||||
});
|
||||
return result.response;
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getSpec,
|
||||
readSpecFile,
|
||||
createOpenAPIPlugin,
|
||||
};
|
||||
72
api/app/clients/tools/dynamic/OpenAPIPlugin.spec.js
Normal file
72
api/app/clients/tools/dynamic/OpenAPIPlugin.spec.js
Normal file
@@ -0,0 +1,72 @@
|
||||
const fs = require('fs');
|
||||
const { createOpenAPIPlugin, getSpec, readSpecFile } = require('./OpenAPIPlugin');
|
||||
|
||||
global.fetch = jest.fn().mockImplementationOnce(() => {
|
||||
return new Promise((resolve) => {
|
||||
resolve({
|
||||
ok: true,
|
||||
json: () => Promise.resolve({ key: 'value' }),
|
||||
});
|
||||
});
|
||||
});
|
||||
jest.mock('fs', () => ({
|
||||
promises: {
|
||||
readFile: jest.fn(),
|
||||
},
|
||||
existsSync: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('readSpecFile', () => {
|
||||
it('reads JSON file correctly', async () => {
|
||||
fs.promises.readFile.mockResolvedValue(JSON.stringify({ test: 'value' }));
|
||||
const result = await readSpecFile('test.json');
|
||||
expect(result).toEqual({ test: 'value' });
|
||||
});
|
||||
|
||||
it('reads YAML file correctly', async () => {
|
||||
fs.promises.readFile.mockResolvedValue('test: value');
|
||||
const result = await readSpecFile('test.yaml');
|
||||
expect(result).toEqual({ test: 'value' });
|
||||
});
|
||||
|
||||
it('handles error correctly', async () => {
|
||||
fs.promises.readFile.mockRejectedValue(new Error('test error'));
|
||||
const result = await readSpecFile('test.json');
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getSpec', () => {
|
||||
it('fetches spec from url correctly', async () => {
|
||||
const parsedJson = await getSpec('https://www.instacart.com/.well-known/ai-plugin.json');
|
||||
const isObject = typeof parsedJson === 'object';
|
||||
expect(isObject).toEqual(true);
|
||||
});
|
||||
|
||||
it('reads spec from file correctly', async () => {
|
||||
fs.existsSync.mockReturnValue(true);
|
||||
fs.promises.readFile.mockResolvedValue(JSON.stringify({ test: 'value' }));
|
||||
const result = await getSpec('test.json');
|
||||
expect(result).toEqual({ test: 'value' });
|
||||
});
|
||||
|
||||
it('returns false when file does not exist', async () => {
|
||||
fs.existsSync.mockReturnValue(false);
|
||||
const result = await getSpec('test.json');
|
||||
expect(result).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('createOpenAPIPlugin', () => {
|
||||
it('returns null when getSpec throws an error', async () => {
|
||||
const result = await createOpenAPIPlugin({ data: { api: { url: 'invalid' } } });
|
||||
expect(result).toBe(null);
|
||||
});
|
||||
|
||||
it('returns null when no spec is found', async () => {
|
||||
const result = await createOpenAPIPlugin({});
|
||||
expect(result).toBe(null);
|
||||
});
|
||||
|
||||
// Add more tests here for different scenarios
|
||||
});
|
||||
@@ -1,4 +1,4 @@
|
||||
const manifest = require('./manifest');
|
||||
const availableTools = require('./manifest.json');
|
||||
|
||||
// Structured Tools
|
||||
const DALLE3 = require('./structured/DALLE3');
|
||||
@@ -13,8 +13,23 @@ const TraversaalSearch = require('./structured/TraversaalSearch');
|
||||
const createOpenAIImageTools = require('./structured/OpenAIImageTools');
|
||||
const TavilySearchResults = require('./structured/TavilySearchResults');
|
||||
|
||||
/** @type {Record<string, TPlugin | undefined>} */
|
||||
const manifestToolMap = {};
|
||||
|
||||
/** @type {Array<TPlugin>} */
|
||||
const toolkits = [];
|
||||
|
||||
availableTools.forEach((tool) => {
|
||||
manifestToolMap[tool.pluginKey] = tool;
|
||||
if (tool.toolkit === true) {
|
||||
toolkits.push(tool);
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
...manifest,
|
||||
toolkits,
|
||||
availableTools,
|
||||
manifestToolMap,
|
||||
// Structured Tools
|
||||
DALLE3,
|
||||
FluxAPI,
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
const availableTools = require('./manifest.json');
|
||||
|
||||
/** @type {Record<string, TPlugin | undefined>} */
|
||||
const manifestToolMap = {};
|
||||
|
||||
/** @type {Array<TPlugin>} */
|
||||
const toolkits = [];
|
||||
|
||||
availableTools.forEach((tool) => {
|
||||
manifestToolMap[tool.pluginKey] = tool;
|
||||
if (tool.toolkit === true) {
|
||||
toolkits.push(tool);
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = {
|
||||
toolkits,
|
||||
availableTools,
|
||||
manifestToolMap,
|
||||
};
|
||||
@@ -49,7 +49,7 @@
|
||||
"pluginKey": "image_gen_oai",
|
||||
"toolkit": true,
|
||||
"description": "Image Generation and Editing using OpenAI's latest state-of-the-art models",
|
||||
"icon": "assets/image_gen_oai.png",
|
||||
"icon": "/assets/image_gen_oai.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "IMAGE_GEN_OAI_API_KEY",
|
||||
@@ -75,7 +75,7 @@
|
||||
"name": "Browser",
|
||||
"pluginKey": "web-browser",
|
||||
"description": "Scrape and summarize webpage data",
|
||||
"icon": "assets/web-browser.svg",
|
||||
"icon": "/assets/web-browser.svg",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "OPENAI_API_KEY",
|
||||
@@ -170,7 +170,7 @@
|
||||
"name": "OpenWeather",
|
||||
"pluginKey": "open_weather",
|
||||
"description": "Get weather forecasts and historical data from the OpenWeather API",
|
||||
"icon": "assets/openweather.png",
|
||||
"icon": "/assets/openweather.png",
|
||||
"authConfig": [
|
||||
{
|
||||
"authField": "OPENWEATHER_API_KEY",
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const { z } = require('zod');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { SearchClient, AzureKeyCredential } = require('@azure/search-documents');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class AzureAISearch extends Tool {
|
||||
// Constants for default values
|
||||
@@ -18,7 +18,7 @@ class AzureAISearch extends Tool {
|
||||
super();
|
||||
this.name = 'azure-ai-search';
|
||||
this.description =
|
||||
"Use the 'azure-ai-search' tool to retrieve search results relevant to your input";
|
||||
'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
|
||||
/* Used to initialize the Tool without necessary variables. */
|
||||
this.override = fields.override ?? false;
|
||||
|
||||
|
||||
@@ -1,16 +1,17 @@
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const OpenAI = require('openai');
|
||||
const fetch = require('node-fetch');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { ProxyAgent, fetch } = require('undici');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getImageBasename } = require('@librechat/api');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||
const { getImageBasename } = require('~/server/services/Files/images');
|
||||
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const displayMessage =
|
||||
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
|
||||
class DALLE3 extends Tool {
|
||||
constructor(fields = {}) {
|
||||
super();
|
||||
@@ -45,10 +46,7 @@ class DALLE3 extends Tool {
|
||||
}
|
||||
|
||||
if (process.env.PROXY) {
|
||||
const proxyAgent = new ProxyAgent(process.env.PROXY);
|
||||
config.fetchOptions = {
|
||||
dispatcher: proxyAgent,
|
||||
};
|
||||
config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
/** @type {OpenAI} */
|
||||
@@ -165,8 +163,7 @@ Error Message: ${error.message}`);
|
||||
if (this.isAgent) {
|
||||
let fetchOptions = {};
|
||||
if (process.env.PROXY) {
|
||||
const proxyAgent = new ProxyAgent(process.env.PROXY);
|
||||
fetchOptions.dispatcher = proxyAgent;
|
||||
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
const imageResponse = await fetch(theImageUrl, fetchOptions);
|
||||
const arrayBuffer = await imageResponse.arrayBuffer();
|
||||
|
||||
@@ -3,12 +3,12 @@ const axios = require('axios');
|
||||
const fetch = require('node-fetch');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const displayMessage =
|
||||
"Flux displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||
'Flux displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
|
||||
|
||||
/**
|
||||
* FluxAPI - A tool for generating high-quality images from text prompts using the Flux API.
|
||||
|
||||
@@ -1,19 +1,70 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { v4 } = require('uuid');
|
||||
const OpenAI = require('openai');
|
||||
const FormData = require('form-data');
|
||||
const { ProxyAgent } = require('undici');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { logAxiosError, oaiToolkit } = require('@librechat/api');
|
||||
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const extractBaseURL = require('~/utils/extractBaseURL');
|
||||
const { logAxiosError, extractBaseURL } = require('~/utils');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/** Default descriptions for image generation tool */
|
||||
const DEFAULT_IMAGE_GEN_DESCRIPTION = `
|
||||
Generates high-quality, original images based solely on text, not using any uploaded reference images.
|
||||
|
||||
When to use \`image_gen_oai\`:
|
||||
- To create entirely new images from detailed text descriptions that do NOT reference any image files.
|
||||
|
||||
When NOT to use \`image_gen_oai\`:
|
||||
- If the user has uploaded any images and requests modifications, enhancements, or remixing based on those uploads → use \`image_edit_oai\` instead.
|
||||
|
||||
Generated image IDs will be returned in the response, so you can refer to them in future requests made to \`image_edit_oai\`.
|
||||
`.trim();
|
||||
|
||||
/** Default description for image editing tool */
|
||||
const DEFAULT_IMAGE_EDIT_DESCRIPTION =
|
||||
`Generates high-quality, original images based on text and one or more uploaded/referenced images.
|
||||
|
||||
When to use \`image_edit_oai\`:
|
||||
- The user wants to modify, extend, or remix one **or more** uploaded images, either:
|
||||
- Previously generated, or in the current request (both to be included in the \`image_ids\` array).
|
||||
- Always when the user refers to uploaded images for editing, enhancement, remixing, style transfer, or combining elements.
|
||||
- Any current or existing images are to be used as visual guides.
|
||||
- If there are any files in the current request, they are more likely than not expected as references for image edit requests.
|
||||
|
||||
When NOT to use \`image_edit_oai\`:
|
||||
- Brand-new generations that do not rely on an existing image → use \`image_gen_oai\` instead.
|
||||
|
||||
Both generated and referenced image IDs will be returned in the response, so you can refer to them in future requests made to \`image_edit_oai\`.
|
||||
`.trim();
|
||||
|
||||
/** Default prompt descriptions */
|
||||
const DEFAULT_IMAGE_GEN_PROMPT_DESCRIPTION = `Describe the image you want in detail.
|
||||
Be highly specific—break your idea into layers:
|
||||
(1) main concept and subject,
|
||||
(2) composition and position,
|
||||
(3) lighting and mood,
|
||||
(4) style, medium, or camera details,
|
||||
(5) important features (age, expression, clothing, etc.),
|
||||
(6) background.
|
||||
Use positive, descriptive language and specify what should be included, not what to avoid.
|
||||
List number and characteristics of people/objects, and mention style/technical requirements (e.g., "DSLR photo, 85mm lens, golden hour").
|
||||
Do not reference any uploaded images—use for new image creation from text only.`;
|
||||
|
||||
const DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION = `Describe the changes, enhancements, or new ideas to apply to the uploaded image(s).
|
||||
Be highly specific—break your request into layers:
|
||||
(1) main concept or transformation,
|
||||
(2) specific edits/replacements or composition guidance,
|
||||
(3) desired style, mood, or technique,
|
||||
(4) features/items to keep, change, or add (such as objects, people, clothing, lighting, etc.).
|
||||
Use positive, descriptive language and clarify what should be included or changed, not what to avoid.
|
||||
Always base this prompt on the most recently uploaded reference images.`;
|
||||
|
||||
const displayMessage =
|
||||
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||
'The tool displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
|
||||
|
||||
/**
|
||||
* Replaces unwanted characters from the input string
|
||||
@@ -39,11 +90,21 @@ function returnValue(value) {
|
||||
return value;
|
||||
}
|
||||
|
||||
function createAbortHandler() {
|
||||
return function () {
|
||||
logger.debug('[ImageGenOAI] Image generation aborted');
|
||||
};
|
||||
}
|
||||
const getImageGenDescription = () => {
|
||||
return process.env.IMAGE_GEN_OAI_DESCRIPTION || DEFAULT_IMAGE_GEN_DESCRIPTION;
|
||||
};
|
||||
|
||||
const getImageEditDescription = () => {
|
||||
return process.env.IMAGE_EDIT_OAI_DESCRIPTION || DEFAULT_IMAGE_EDIT_DESCRIPTION;
|
||||
};
|
||||
|
||||
const getImageGenPromptDescription = () => {
|
||||
return process.env.IMAGE_GEN_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_GEN_PROMPT_DESCRIPTION;
|
||||
};
|
||||
|
||||
const getImageEditPromptDescription = () => {
|
||||
return process.env.IMAGE_EDIT_OAI_PROMPT_DESCRIPTION || DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates OpenAI Image tools (generation and editing)
|
||||
@@ -53,9 +114,7 @@ function createAbortHandler() {
|
||||
* @param {string} fields.IMAGE_GEN_OAI_API_KEY - The OpenAI API key
|
||||
* @param {boolean} [fields.override] - Whether to override the API key check, necessary for app initialization
|
||||
* @param {MongoFile[]} [fields.imageFiles] - The images to be used for editing
|
||||
* @param {string} [fields.imageOutputType] - The image output type configuration
|
||||
* @param {string} [fields.fileStrategy] - The file storage strategy
|
||||
* @returns {Array<ReturnType<tool>>} - Array of image tools
|
||||
* @returns {Array} - Array of image tools
|
||||
*/
|
||||
function createOpenAIImageTools(fields = {}) {
|
||||
/** @type {boolean} Used to initialize the Tool without necessary variables. */
|
||||
@@ -65,8 +124,8 @@ function createOpenAIImageTools(fields = {}) {
|
||||
throw new Error('This tool is only available for agents.');
|
||||
}
|
||||
const { req } = fields;
|
||||
const imageOutputType = fields.imageOutputType || EImageOutputType.PNG;
|
||||
const appFileStrategy = fields.fileStrategy;
|
||||
const imageOutputType = req?.app.locals.imageOutputType || EImageOutputType.PNG;
|
||||
const appFileStrategy = req?.app.locals.fileStrategy;
|
||||
|
||||
const getApiKey = () => {
|
||||
const apiKey = process.env.IMAGE_GEN_OAI_API_KEY ?? '';
|
||||
@@ -123,10 +182,7 @@ function createOpenAIImageTools(fields = {}) {
|
||||
}
|
||||
const clientConfig = { ...closureConfig };
|
||||
if (process.env.PROXY) {
|
||||
const proxyAgent = new ProxyAgent(process.env.PROXY);
|
||||
clientConfig.fetchOptions = {
|
||||
dispatcher: proxyAgent,
|
||||
};
|
||||
clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
/** @type {OpenAI} */
|
||||
@@ -144,18 +200,10 @@ function createOpenAIImageTools(fields = {}) {
|
||||
}
|
||||
|
||||
let resp;
|
||||
/** @type {AbortSignal} */
|
||||
let derivedSignal = null;
|
||||
/** @type {() => void} */
|
||||
let abortHandler = null;
|
||||
|
||||
try {
|
||||
if (runnableConfig?.signal) {
|
||||
derivedSignal = AbortSignal.any([runnableConfig.signal]);
|
||||
abortHandler = createAbortHandler();
|
||||
derivedSignal.addEventListener('abort', abortHandler, { once: true });
|
||||
}
|
||||
|
||||
const derivedSignal = runnableConfig?.signal
|
||||
? AbortSignal.any([runnableConfig.signal])
|
||||
: undefined;
|
||||
resp = await openai.images.generate(
|
||||
{
|
||||
model: 'gpt-image-1',
|
||||
@@ -179,10 +227,6 @@ function createOpenAIImageTools(fields = {}) {
|
||||
logAxiosError({ error, message });
|
||||
return returnValue(`Something went wrong when trying to generate the image. The OpenAI API may be unavailable:
|
||||
Error Message: ${error.message}`);
|
||||
} finally {
|
||||
if (abortHandler && derivedSignal) {
|
||||
derivedSignal.removeEventListener('abort', abortHandler);
|
||||
}
|
||||
}
|
||||
|
||||
if (!resp) {
|
||||
@@ -219,7 +263,46 @@ Error Message: ${error.message}`);
|
||||
];
|
||||
return [response, { content, file_ids }];
|
||||
},
|
||||
oaiToolkit.image_gen_oai,
|
||||
{
|
||||
name: 'image_gen_oai',
|
||||
description: getImageGenDescription(),
|
||||
schema: z.object({
|
||||
prompt: z.string().max(32000).describe(getImageGenPromptDescription()),
|
||||
background: z
|
||||
.enum(['transparent', 'opaque', 'auto'])
|
||||
.optional()
|
||||
.describe(
|
||||
'Sets transparency for the background. Must be one of transparent, opaque or auto (default). When transparent, the output format should be png or webp.',
|
||||
),
|
||||
/*
|
||||
n: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1)
|
||||
.max(10)
|
||||
.optional()
|
||||
.describe('The number of images to generate. Must be between 1 and 10.'),
|
||||
output_compression: z
|
||||
.number()
|
||||
.int()
|
||||
.min(0)
|
||||
.max(100)
|
||||
.optional()
|
||||
.describe('The compression level (0-100%) for webp or jpeg formats. Defaults to 100.'),
|
||||
*/
|
||||
quality: z
|
||||
.enum(['auto', 'high', 'medium', 'low'])
|
||||
.optional()
|
||||
.describe('The quality of the image. One of auto (default), high, medium, or low.'),
|
||||
size: z
|
||||
.enum(['auto', '1024x1024', '1536x1024', '1024x1536'])
|
||||
.optional()
|
||||
.describe(
|
||||
'The size of the generated image. One of 1024x1024, 1536x1024 (landscape), 1024x1536 (portrait), or auto (default).',
|
||||
),
|
||||
}),
|
||||
responseFormat: 'content_and_artifact',
|
||||
},
|
||||
);
|
||||
|
||||
/**
|
||||
@@ -233,10 +316,7 @@ Error Message: ${error.message}`);
|
||||
|
||||
const clientConfig = { ...closureConfig };
|
||||
if (process.env.PROXY) {
|
||||
const proxyAgent = new ProxyAgent(process.env.PROXY);
|
||||
clientConfig.fetchOptions = {
|
||||
dispatcher: proxyAgent,
|
||||
};
|
||||
clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
const formData = new FormData();
|
||||
@@ -328,17 +408,10 @@ Error Message: ${error.message}`);
|
||||
headers['Authorization'] = `Bearer ${apiKey}`;
|
||||
}
|
||||
|
||||
/** @type {AbortSignal} */
|
||||
let derivedSignal = null;
|
||||
/** @type {() => void} */
|
||||
let abortHandler = null;
|
||||
|
||||
try {
|
||||
if (runnableConfig?.signal) {
|
||||
derivedSignal = AbortSignal.any([runnableConfig.signal]);
|
||||
abortHandler = createAbortHandler();
|
||||
derivedSignal.addEventListener('abort', abortHandler, { once: true });
|
||||
}
|
||||
const derivedSignal = runnableConfig?.signal
|
||||
? AbortSignal.any([runnableConfig.signal])
|
||||
: undefined;
|
||||
|
||||
/** @type {import('axios').AxiosRequestConfig} */
|
||||
const axiosConfig = {
|
||||
@@ -348,10 +421,6 @@ Error Message: ${error.message}`);
|
||||
baseURL,
|
||||
};
|
||||
|
||||
if (process.env.PROXY) {
|
||||
axiosConfig.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) {
|
||||
axiosConfig.params = {
|
||||
'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION,
|
||||
@@ -397,13 +466,50 @@ Error Message: ${error.message}`);
|
||||
logAxiosError({ error, message });
|
||||
return returnValue(`Something went wrong when trying to edit the image. The OpenAI API may be unavailable:
|
||||
Error Message: ${error.message || 'Unknown error'}`);
|
||||
} finally {
|
||||
if (abortHandler && derivedSignal) {
|
||||
derivedSignal.removeEventListener('abort', abortHandler);
|
||||
}
|
||||
}
|
||||
},
|
||||
oaiToolkit.image_edit_oai,
|
||||
{
|
||||
name: 'image_edit_oai',
|
||||
description: getImageEditDescription(),
|
||||
schema: z.object({
|
||||
image_ids: z
|
||||
.array(z.string())
|
||||
.min(1)
|
||||
.describe(
|
||||
`
|
||||
IDs (image ID strings) of previously generated or uploaded images that should guide the edit.
|
||||
|
||||
Guidelines:
|
||||
- If the user's request depends on any prior image(s), copy their image IDs into the \`image_ids\` array (in the same order the user refers to them).
|
||||
- Never invent or hallucinate IDs; only use IDs that are still visible in the conversation context.
|
||||
- If no earlier image is relevant, omit the field entirely.
|
||||
`.trim(),
|
||||
),
|
||||
prompt: z.string().max(32000).describe(getImageEditPromptDescription()),
|
||||
/*
|
||||
n: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1)
|
||||
.max(10)
|
||||
.optional()
|
||||
.describe('The number of images to generate. Must be between 1 and 10. Defaults to 1.'),
|
||||
*/
|
||||
quality: z
|
||||
.enum(['auto', 'high', 'medium', 'low'])
|
||||
.optional()
|
||||
.describe(
|
||||
'The quality of the image. One of auto (default), high, medium, or low. High/medium/low only supported for gpt-image-1.',
|
||||
),
|
||||
size: z
|
||||
.enum(['auto', '1024x1024', '1536x1024', '1024x1536', '256x256', '512x512'])
|
||||
.optional()
|
||||
.describe(
|
||||
'The size of the generated images. For gpt-image-1: auto (default), 1024x1024, 1536x1024, 1024x1536. For dall-e-2: 256x256, 512x512, 1024x1024.',
|
||||
),
|
||||
}),
|
||||
responseFormat: 'content_and_artifact',
|
||||
},
|
||||
);
|
||||
|
||||
return [imageGenTool, imageEditTool];
|
||||
|
||||
@@ -6,19 +6,19 @@ const axios = require('axios');
|
||||
const sharp = require('sharp');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { FileContext, ContentTypes } = require('librechat-data-provider');
|
||||
const paths = require('~/config/paths');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const displayMessage =
|
||||
"Stable Diffusion displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
|
||||
'Stable Diffusion displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
|
||||
|
||||
class StableDiffusionAPI extends Tool {
|
||||
constructor(fields) {
|
||||
super();
|
||||
/** @type {string} User ID */
|
||||
this.userId = fields.userId;
|
||||
/** @type {ServerRequest | undefined} Express Request object, only provided by ToolService */
|
||||
/** @type {Express.Request | undefined} Express Request object, only provided by ToolService */
|
||||
this.req = fields.req;
|
||||
/** @type {boolean} Used to initialize the Tool without necessary variables. */
|
||||
this.override = fields.override ?? false;
|
||||
@@ -44,7 +44,7 @@ class StableDiffusionAPI extends Tool {
|
||||
// "negative_prompt":"semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, out of frame, low quality, ugly, mutation, deformed"
|
||||
// - Generate images only once per human query unless explicitly requested by the user`;
|
||||
this.description =
|
||||
"You can generate images using text with 'stable-diffusion'. This tool is exclusively for visual content.";
|
||||
'You can generate images using text with \'stable-diffusion\'. This tool is exclusively for visual content.';
|
||||
this.schema = z.object({
|
||||
prompt: z
|
||||
.string()
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const { z } = require('zod');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Tool for the Traversaal AI search API, Ares.
|
||||
@@ -21,7 +21,7 @@ class TraversaalSearch extends Tool {
|
||||
query: z
|
||||
.string()
|
||||
.describe(
|
||||
"A properly written sentence to be interpreted by an AI to search the web according to the user's request.",
|
||||
'A properly written sentence to be interpreted by an AI to search the web according to the user\'s request.',
|
||||
),
|
||||
});
|
||||
|
||||
@@ -38,6 +38,7 @@ class TraversaalSearch extends Tool {
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async _call({ query }, _runManager) {
|
||||
const body = {
|
||||
query: [query],
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/* eslint-disable no-useless-escape */
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { z } = require('zod');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class WolframAlphaAPI extends Tool {
|
||||
constructor(fields) {
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
const { ytToolkit } = require('@librechat/api');
|
||||
const { z } = require('zod');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { youtube } = require('@googleapis/youtube');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { YoutubeTranscript } = require('youtube-transcript');
|
||||
const { getApiKey } = require('./credentials');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
function extractVideoId(url) {
|
||||
const rawIdRegex = /^[a-zA-Z0-9_-]{11}$/;
|
||||
@@ -29,7 +29,7 @@ function parseTranscript(transcriptResponse) {
|
||||
.map((entry) => entry.text.trim())
|
||||
.filter((text) => text)
|
||||
.join(' ')
|
||||
.replaceAll('&#39;', "'");
|
||||
.replaceAll('&#39;', '\'');
|
||||
}
|
||||
|
||||
function createYouTubeTools(fields = {}) {
|
||||
@@ -42,94 +42,160 @@ function createYouTubeTools(fields = {}) {
|
||||
auth: apiKey,
|
||||
});
|
||||
|
||||
const searchTool = tool(async ({ query, maxResults = 5 }) => {
|
||||
const response = await youtubeClient.search.list({
|
||||
part: 'snippet',
|
||||
q: query,
|
||||
type: 'video',
|
||||
maxResults: maxResults || 5,
|
||||
});
|
||||
const result = response.data.items.map((item) => ({
|
||||
title: item.snippet.title,
|
||||
description: item.snippet.description,
|
||||
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
|
||||
}));
|
||||
return JSON.stringify(result, null, 2);
|
||||
}, ytToolkit.youtube_search);
|
||||
const searchTool = tool(
|
||||
async ({ query, maxResults = 5 }) => {
|
||||
const response = await youtubeClient.search.list({
|
||||
part: 'snippet',
|
||||
q: query,
|
||||
type: 'video',
|
||||
maxResults: maxResults || 5,
|
||||
});
|
||||
const result = response.data.items.map((item) => ({
|
||||
title: item.snippet.title,
|
||||
description: item.snippet.description,
|
||||
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
|
||||
}));
|
||||
return JSON.stringify(result, null, 2);
|
||||
},
|
||||
{
|
||||
name: 'youtube_search',
|
||||
description: `Search for YouTube videos by keyword or phrase.
|
||||
- Required: query (search terms to find videos)
|
||||
- Optional: maxResults (number of videos to return, 1-50, default: 5)
|
||||
- Returns: List of videos with titles, descriptions, and URLs
|
||||
- Use for: Finding specific videos, exploring content, research
|
||||
Example: query="cooking pasta tutorials" maxResults=3`,
|
||||
schema: z.object({
|
||||
query: z.string().describe('Search query terms'),
|
||||
maxResults: z.number().int().min(1).max(50).optional().describe('Number of results (1-50)'),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
const infoTool = tool(async ({ url }) => {
|
||||
const videoId = extractVideoId(url);
|
||||
if (!videoId) {
|
||||
throw new Error('Invalid YouTube URL or video ID');
|
||||
}
|
||||
const infoTool = tool(
|
||||
async ({ url }) => {
|
||||
const videoId = extractVideoId(url);
|
||||
if (!videoId) {
|
||||
throw new Error('Invalid YouTube URL or video ID');
|
||||
}
|
||||
|
||||
const response = await youtubeClient.videos.list({
|
||||
part: 'snippet,statistics',
|
||||
id: videoId,
|
||||
});
|
||||
const response = await youtubeClient.videos.list({
|
||||
part: 'snippet,statistics',
|
||||
id: videoId,
|
||||
});
|
||||
|
||||
if (!response.data.items?.length) {
|
||||
throw new Error('Video not found');
|
||||
}
|
||||
const video = response.data.items[0];
|
||||
if (!response.data.items?.length) {
|
||||
throw new Error('Video not found');
|
||||
}
|
||||
const video = response.data.items[0];
|
||||
|
||||
const result = {
|
||||
title: video.snippet.title,
|
||||
description: video.snippet.description,
|
||||
views: video.statistics.viewCount,
|
||||
likes: video.statistics.likeCount,
|
||||
comments: video.statistics.commentCount,
|
||||
};
|
||||
return JSON.stringify(result, null, 2);
|
||||
}, ytToolkit.youtube_info);
|
||||
const result = {
|
||||
title: video.snippet.title,
|
||||
description: video.snippet.description,
|
||||
views: video.statistics.viewCount,
|
||||
likes: video.statistics.likeCount,
|
||||
comments: video.statistics.commentCount,
|
||||
};
|
||||
return JSON.stringify(result, null, 2);
|
||||
},
|
||||
{
|
||||
name: 'youtube_info',
|
||||
description: `Get detailed metadata and statistics for a specific YouTube video.
|
||||
- Required: url (full YouTube URL or video ID)
|
||||
- Returns: Video title, description, view count, like count, comment count
|
||||
- Use for: Getting video metrics and basic metadata
|
||||
- DO NOT USE FOR VIDEO SUMMARIES, USE TRANSCRIPTS FOR COMPREHENSIVE ANALYSIS
|
||||
- Accepts both full URLs and video IDs
|
||||
Example: url="https://youtube.com/watch?v=abc123" or url="abc123"`,
|
||||
schema: z.object({
|
||||
url: z.string().describe('YouTube video URL or ID'),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
const commentsTool = tool(async ({ url, maxResults = 10 }) => {
|
||||
const videoId = extractVideoId(url);
|
||||
if (!videoId) {
|
||||
throw new Error('Invalid YouTube URL or video ID');
|
||||
}
|
||||
const commentsTool = tool(
|
||||
async ({ url, maxResults = 10 }) => {
|
||||
const videoId = extractVideoId(url);
|
||||
if (!videoId) {
|
||||
throw new Error('Invalid YouTube URL or video ID');
|
||||
}
|
||||
|
||||
const response = await youtubeClient.commentThreads.list({
|
||||
part: 'snippet',
|
||||
videoId,
|
||||
maxResults: maxResults || 10,
|
||||
});
|
||||
const response = await youtubeClient.commentThreads.list({
|
||||
part: 'snippet',
|
||||
videoId,
|
||||
maxResults: maxResults || 10,
|
||||
});
|
||||
|
||||
const result = response.data.items.map((item) => ({
|
||||
author: item.snippet.topLevelComment.snippet.authorDisplayName,
|
||||
text: item.snippet.topLevelComment.snippet.textDisplay,
|
||||
likes: item.snippet.topLevelComment.snippet.likeCount,
|
||||
}));
|
||||
return JSON.stringify(result, null, 2);
|
||||
}, ytToolkit.youtube_comments);
|
||||
const result = response.data.items.map((item) => ({
|
||||
author: item.snippet.topLevelComment.snippet.authorDisplayName,
|
||||
text: item.snippet.topLevelComment.snippet.textDisplay,
|
||||
likes: item.snippet.topLevelComment.snippet.likeCount,
|
||||
}));
|
||||
return JSON.stringify(result, null, 2);
|
||||
},
|
||||
{
|
||||
name: 'youtube_comments',
|
||||
description: `Retrieve top-level comments from a YouTube video.
|
||||
- Required: url (full YouTube URL or video ID)
|
||||
- Optional: maxResults (number of comments, 1-50, default: 10)
|
||||
- Returns: Comment text, author names, like counts
|
||||
- Use for: Sentiment analysis, audience feedback, engagement review
|
||||
Example: url="abc123" maxResults=20`,
|
||||
schema: z.object({
|
||||
url: z.string().describe('YouTube video URL or ID'),
|
||||
maxResults: z
|
||||
.number()
|
||||
.int()
|
||||
.min(1)
|
||||
.max(50)
|
||||
.optional()
|
||||
.describe('Number of comments to retrieve'),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
const transcriptTool = tool(async ({ url }) => {
|
||||
const videoId = extractVideoId(url);
|
||||
if (!videoId) {
|
||||
throw new Error('Invalid YouTube URL or video ID');
|
||||
}
|
||||
|
||||
try {
|
||||
try {
|
||||
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
|
||||
return parseTranscript(transcript);
|
||||
} catch (e) {
|
||||
logger.error(e);
|
||||
const transcriptTool = tool(
|
||||
async ({ url }) => {
|
||||
const videoId = extractVideoId(url);
|
||||
if (!videoId) {
|
||||
throw new Error('Invalid YouTube URL or video ID');
|
||||
}
|
||||
|
||||
try {
|
||||
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
|
||||
return parseTranscript(transcript);
|
||||
} catch (e) {
|
||||
logger.error(e);
|
||||
}
|
||||
try {
|
||||
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
|
||||
return parseTranscript(transcript);
|
||||
} catch (e) {
|
||||
logger.error(e);
|
||||
}
|
||||
|
||||
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
|
||||
return parseTranscript(transcript);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to fetch transcript: ${error.message}`);
|
||||
}
|
||||
}, ytToolkit.youtube_transcript);
|
||||
try {
|
||||
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
|
||||
return parseTranscript(transcript);
|
||||
} catch (e) {
|
||||
logger.error(e);
|
||||
}
|
||||
|
||||
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
|
||||
return parseTranscript(transcript);
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to fetch transcript: ${error.message}`);
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'youtube_transcript',
|
||||
description: `Fetch and parse the transcript/captions of a YouTube video.
|
||||
- Required: url (full YouTube URL or video ID)
|
||||
- Returns: Full video transcript as plain text
|
||||
- Use for: Content analysis, summarization, translation reference
|
||||
- This is the "Go-to" tool for analyzing actual video content
|
||||
- Attempts to fetch English first, then German, then any available language
|
||||
Example: url="https://youtube.com/watch?v=abc123"`,
|
||||
schema: z.object({
|
||||
url: z.string().describe('YouTube video URL or ID'),
|
||||
}),
|
||||
},
|
||||
);
|
||||
|
||||
return [searchTool, infoTool, commentsTool, transcriptTool];
|
||||
}
|
||||
|
||||
@@ -1,60 +0,0 @@
|
||||
const DALLE3 = require('../DALLE3');
|
||||
const { ProxyAgent } = require('undici');
|
||||
|
||||
jest.mock('tiktoken');
|
||||
const processFileURL = jest.fn();
|
||||
|
||||
describe('DALLE3 Proxy Configuration', () => {
|
||||
let originalEnv;
|
||||
|
||||
beforeAll(() => {
|
||||
originalEnv = { ...process.env };
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('should configure ProxyAgent in fetchOptions.dispatcher when PROXY env is set', () => {
|
||||
// Set proxy environment variable
|
||||
process.env.PROXY = 'http://proxy.example.com:8080';
|
||||
process.env.DALLE_API_KEY = 'test-api-key';
|
||||
|
||||
// Create instance
|
||||
const dalleWithProxy = new DALLE3({ processFileURL });
|
||||
|
||||
// Check that the openai client exists
|
||||
expect(dalleWithProxy.openai).toBeDefined();
|
||||
|
||||
// Check that _options exists and has fetchOptions with a dispatcher
|
||||
expect(dalleWithProxy.openai._options).toBeDefined();
|
||||
expect(dalleWithProxy.openai._options.fetchOptions).toBeDefined();
|
||||
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeDefined();
|
||||
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent);
|
||||
});
|
||||
|
||||
it('should not configure ProxyAgent when PROXY env is not set', () => {
|
||||
// Ensure PROXY is not set
|
||||
delete process.env.PROXY;
|
||||
process.env.DALLE_API_KEY = 'test-api-key';
|
||||
|
||||
// Create instance
|
||||
const dalleWithoutProxy = new DALLE3({ processFileURL });
|
||||
|
||||
// Check that the openai client exists
|
||||
expect(dalleWithoutProxy.openai).toBeDefined();
|
||||
|
||||
// Check that _options exists but fetchOptions either doesn't exist or doesn't have a dispatcher
|
||||
expect(dalleWithoutProxy.openai._options).toBeDefined();
|
||||
|
||||
// fetchOptions should either not exist or not have a dispatcher
|
||||
if (dalleWithoutProxy.openai._options.fetchOptions) {
|
||||
expect(dalleWithoutProxy.openai._options.fetchOptions.dispatcher).toBeUndefined();
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1,30 +1,31 @@
|
||||
const OpenAI = require('openai');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const DALLE3 = require('../DALLE3');
|
||||
|
||||
jest.mock('openai');
|
||||
jest.mock('@librechat/data-schemas', () => {
|
||||
return {
|
||||
logger: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
error: jest.fn(),
|
||||
},
|
||||
};
|
||||
});
|
||||
const { logger } = require('~/config');
|
||||
|
||||
jest.mock('tiktoken', () => {
|
||||
return {
|
||||
encoding_for_model: jest.fn().mockReturnValue({
|
||||
encode: jest.fn(),
|
||||
decode: jest.fn(),
|
||||
}),
|
||||
};
|
||||
});
|
||||
jest.mock('openai');
|
||||
|
||||
const processFileURL = jest.fn();
|
||||
|
||||
jest.mock('~/server/services/Files/images', () => ({
|
||||
getImageBasename: jest.fn().mockImplementation((url) => {
|
||||
// Split the URL by '/'
|
||||
const parts = url.split('/');
|
||||
|
||||
// Get the last part of the URL
|
||||
const lastPart = parts.pop();
|
||||
|
||||
// Check if the last part of the URL matches the image extension regex
|
||||
const imageExtensionRegex = /\.(jpg|jpeg|png|gif|bmp|tiff|svg)$/i;
|
||||
if (imageExtensionRegex.test(lastPart)) {
|
||||
return lastPart;
|
||||
}
|
||||
|
||||
// If the regex test fails, return an empty string
|
||||
return '';
|
||||
}),
|
||||
}));
|
||||
|
||||
const generate = jest.fn();
|
||||
OpenAI.mockImplementation(() => ({
|
||||
images: {
|
||||
@@ -36,11 +37,6 @@ jest.mock('fs', () => {
|
||||
return {
|
||||
existsSync: jest.fn(),
|
||||
mkdirSync: jest.fn(),
|
||||
promises: {
|
||||
writeFile: jest.fn(),
|
||||
readFile: jest.fn(),
|
||||
unlink: jest.fn(),
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
|
||||
@@ -1,46 +1,26 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { generateShortLivedToken } = require('@librechat/api');
|
||||
const { Tools, EToolResources } = require('librechat-data-provider');
|
||||
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {ServerRequest} options.req
|
||||
* @param {Agent['tool_resources']} options.tool_resources
|
||||
* @param {string} [options.agentId] - The agent ID for file access control
|
||||
* @returns {Promise<{
|
||||
* files: Array<{ file_id: string; filename: string }>,
|
||||
* toolContext: string
|
||||
* }>}
|
||||
*/
|
||||
const primeFiles = async (options) => {
|
||||
const { tool_resources, req, agentId } = options;
|
||||
const { tool_resources } = options;
|
||||
const file_ids = tool_resources?.[EToolResources.file_search]?.file_ids ?? [];
|
||||
const agentResourceIds = new Set(file_ids);
|
||||
const resourceFiles = tool_resources?.[EToolResources.file_search]?.files ?? [];
|
||||
|
||||
// Get all files first
|
||||
const allFiles = (await getFiles({ file_id: { $in: file_ids } }, null, { text: 0 })) ?? [];
|
||||
|
||||
// Filter by access if user and agent are provided
|
||||
let dbFiles;
|
||||
if (req?.user?.id && agentId) {
|
||||
dbFiles = await filterFilesByAgentAccess({
|
||||
files: allFiles,
|
||||
userId: req.user.id,
|
||||
role: req.user.role,
|
||||
agentId,
|
||||
});
|
||||
} else {
|
||||
dbFiles = allFiles;
|
||||
}
|
||||
|
||||
dbFiles = dbFiles.concat(resourceFiles);
|
||||
const dbFiles = ((await getFiles({ file_id: { $in: file_ids } })) ?? []).concat(resourceFiles);
|
||||
|
||||
let toolContext = `- Note: Semantic search is available through the ${Tools.file_search} tool but no files are currently loaded. Request the user to upload documents to search through.`;
|
||||
|
||||
@@ -68,19 +48,18 @@ const primeFiles = async (options) => {
|
||||
/**
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {string} options.userId
|
||||
* @param {ServerRequest} options.req
|
||||
* @param {Array<{ file_id: string; filename: string }>} options.files
|
||||
* @param {string} [options.entity_id]
|
||||
* @param {boolean} [options.fileCitations=false] - Whether to include citation instructions
|
||||
* @returns
|
||||
*/
|
||||
const createFileSearchTool = async ({ userId, files, entity_id, fileCitations = false }) => {
|
||||
const createFileSearchTool = async ({ req, files, entity_id }) => {
|
||||
return tool(
|
||||
async ({ query }) => {
|
||||
if (files.length === 0) {
|
||||
return 'No files to search. Instruct the user to add files for the search.';
|
||||
}
|
||||
const jwtToken = generateShortLivedToken(userId);
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
if (!jwtToken) {
|
||||
return 'There was an error authenticating the file search request.';
|
||||
}
|
||||
@@ -126,13 +105,11 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
||||
}
|
||||
|
||||
const formattedResults = validResults
|
||||
.flatMap((result, fileIndex) =>
|
||||
.flatMap((result) =>
|
||||
result.data.map(([docInfo, distance]) => ({
|
||||
filename: docInfo.metadata.source.split('/').pop(),
|
||||
content: docInfo.page_content,
|
||||
distance,
|
||||
file_id: files[fileIndex]?.file_id,
|
||||
page: docInfo.metadata.page || null,
|
||||
})),
|
||||
)
|
||||
// TODO: results should be sorted by relevance, not distance
|
||||
@@ -142,46 +119,23 @@ const createFileSearchTool = async ({ userId, files, entity_id, fileCitations =
|
||||
|
||||
const formattedString = formattedResults
|
||||
.map(
|
||||
(result, index) =>
|
||||
`File: ${result.filename}${
|
||||
fileCitations ? `\nAnchor: \\ue202turn0file${index} (${result.filename})` : ''
|
||||
}\nRelevance: ${(1.0 - result.distance).toFixed(4)}\nContent: ${result.content}\n`,
|
||||
(result) =>
|
||||
`File: ${result.filename}\nRelevance: ${1.0 - result.distance.toFixed(4)}\nContent: ${
|
||||
result.content
|
||||
}\n`,
|
||||
)
|
||||
.join('\n---\n');
|
||||
|
||||
const sources = formattedResults.map((result) => ({
|
||||
type: 'file',
|
||||
fileId: result.file_id,
|
||||
content: result.content,
|
||||
fileName: result.filename,
|
||||
relevance: 1.0 - result.distance,
|
||||
pages: result.page ? [result.page] : [],
|
||||
pageRelevance: result.page ? { [result.page]: 1.0 - result.distance } : {},
|
||||
}));
|
||||
|
||||
return [formattedString, { [Tools.file_search]: { sources, fileCitations } }];
|
||||
return formattedString;
|
||||
},
|
||||
{
|
||||
name: Tools.file_search,
|
||||
responseFormat: 'content_and_artifact',
|
||||
description: `Performs semantic search across attached "${Tools.file_search}" documents using natural language queries. This tool analyzes the content of uploaded files to find relevant information, quotes, and passages that best match your query. Use this to extract specific information or find relevant sections within the available documents.${
|
||||
fileCitations
|
||||
? `
|
||||
|
||||
**CITE FILE SEARCH RESULTS:**
|
||||
Use anchor markers immediately after statements derived from file content. Reference the filename in your text:
|
||||
- File citation: "The document.pdf states that... \\ue202turn0file0"
|
||||
- Page reference: "According to report.docx... \\ue202turn0file1"
|
||||
- Multi-file: "Multiple sources confirm... \\ue200\\ue202turn0file0\\ue202turn0file1\\ue201"
|
||||
|
||||
**ALWAYS mention the filename in your text before the citation marker. NEVER use markdown links or footnotes.**`
|
||||
: ''
|
||||
}`,
|
||||
description: `Performs semantic search across attached "${Tools.file_search}" documents using natural language queries. This tool analyzes the content of uploaded files to find relevant information, quotes, and passages that best match your query. Use this to extract specific information or find relevant sections within the available documents.`,
|
||||
schema: z.object({
|
||||
query: z
|
||||
.string()
|
||||
.describe(
|
||||
"A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you're looking for. The query will be used for semantic similarity matching against the file contents.",
|
||||
'A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you\'re looking for. The query will be used for semantic similarity matching against the file contents.',
|
||||
),
|
||||
}),
|
||||
},
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const OpenAI = require('openai');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Handles errors that may occur when making requests to OpenAI's API.
|
||||
|
||||
@@ -1,21 +1,14 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { SerpAPI } = require('@langchain/community/tools/serpapi');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
|
||||
const {
|
||||
checkAccess,
|
||||
createSafeUser,
|
||||
mcpToolPattern,
|
||||
loadWebSearchAuth,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
Permissions,
|
||||
EToolResources,
|
||||
PermissionTypes,
|
||||
loadWebSearchAuth,
|
||||
replaceSpecialVars,
|
||||
} = require('librechat-data-provider');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const {
|
||||
availableTools,
|
||||
manifestToolMap,
|
||||
@@ -35,11 +28,11 @@ const {
|
||||
} = require('../');
|
||||
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
|
||||
const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSearch');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const { createMCPTool, createMCPTools } = require('~/server/services/MCP');
|
||||
const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { getMCPServerTools } = require('~/server/services/Config');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
const { createMCPTool } = require('~/server/services/MCP');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const mcpToolPattern = new RegExp(`^.+${Constants.mcp_delimiter}.+$`);
|
||||
|
||||
/**
|
||||
* Validates the availability and authentication of tools for a user based on environment variables or user-specific plugin authentication values.
|
||||
@@ -100,7 +93,7 @@ const validateTools = async (user, tools = []) => {
|
||||
return Array.from(validToolsSet.values());
|
||||
} catch (err) {
|
||||
logger.error('[validateTools] There was a problem validating tools', err);
|
||||
throw new Error(err);
|
||||
throw new Error('There was a problem validating tools');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -134,37 +127,27 @@ const getAuthFields = (toolKey) => {
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {object} params
|
||||
* @param {string} params.user
|
||||
* @param {Record<string, Record<string, string>>} [object.userMCPAuthMap]
|
||||
* @param {AbortSignal} [object.signal]
|
||||
* @param {Pick<Agent, 'id' | 'provider' | 'model'>} [params.agent]
|
||||
* @param {string} [params.model]
|
||||
* @param {EModelEndpoint} [params.endpoint]
|
||||
* @param {LoadToolOptions} [params.options]
|
||||
* @param {boolean} [params.useSpecs]
|
||||
* @param {Array<string>} params.tools
|
||||
* @param {boolean} [params.functions]
|
||||
* @param {boolean} [params.returnMap]
|
||||
* @param {AppConfig['webSearch']} [params.webSearch]
|
||||
* @param {AppConfig['fileStrategy']} [params.fileStrategy]
|
||||
* @param {AppConfig['imageOutputType']} [params.imageOutputType]
|
||||
* @param {object} object
|
||||
* @param {string} object.user
|
||||
* @param {Pick<Agent, 'id' | 'provider' | 'model'>} [object.agent]
|
||||
* @param {string} [object.model]
|
||||
* @param {EModelEndpoint} [object.endpoint]
|
||||
* @param {LoadToolOptions} [object.options]
|
||||
* @param {boolean} [object.useSpecs]
|
||||
* @param {Array<string>} object.tools
|
||||
* @param {boolean} [object.functions]
|
||||
* @param {boolean} [object.returnMap]
|
||||
* @returns {Promise<{ loadedTools: Tool[], toolContextMap: Object<string, any> } | Record<string,Tool>>}
|
||||
*/
|
||||
const loadTools = async ({
|
||||
user,
|
||||
agent,
|
||||
model,
|
||||
signal,
|
||||
endpoint,
|
||||
userMCPAuthMap,
|
||||
tools = [],
|
||||
options = {},
|
||||
functions = true,
|
||||
returnMap = false,
|
||||
webSearch,
|
||||
fileStrategy,
|
||||
imageOutputType,
|
||||
}) => {
|
||||
const toolConstructors = {
|
||||
flux: FluxAPI,
|
||||
@@ -223,8 +206,6 @@ const loadTools = async ({
|
||||
...authValues,
|
||||
isAgent: !!agent,
|
||||
req: options.req,
|
||||
imageOutputType,
|
||||
fileStrategy,
|
||||
imageFiles,
|
||||
});
|
||||
},
|
||||
@@ -240,7 +221,7 @@ const loadTools = async ({
|
||||
const imageGenOptions = {
|
||||
isAgent: !!agent,
|
||||
req: options.req,
|
||||
fileStrategy,
|
||||
fileStrategy: options.fileStrategy,
|
||||
processFileURL: options.processFileURL,
|
||||
returnMetadata: options.returnMetadata,
|
||||
uploadImageBuffer: options.uploadImageBuffer,
|
||||
@@ -255,7 +236,7 @@ const loadTools = async ({
|
||||
|
||||
/** @type {Record<string, string>} */
|
||||
const toolContextMap = {};
|
||||
const requestedMCPTools = {};
|
||||
const appTools = options.req?.app?.locals?.availableTools ?? {};
|
||||
|
||||
for (const tool of tools) {
|
||||
if (tool === Tools.execute_code) {
|
||||
@@ -265,13 +246,7 @@ const loadTools = async ({
|
||||
authFields: [EnvVar.CODE_API_KEY],
|
||||
});
|
||||
const codeApiKey = authValues[EnvVar.CODE_API_KEY];
|
||||
const { files, toolContext } = await primeCodeFiles(
|
||||
{
|
||||
...options,
|
||||
agentId: agent?.id,
|
||||
},
|
||||
codeApiKey,
|
||||
);
|
||||
const { files, toolContext } = await primeCodeFiles(options, codeApiKey);
|
||||
if (toolContext) {
|
||||
toolContextMap[tool] = toolContext;
|
||||
}
|
||||
@@ -286,43 +261,19 @@ const loadTools = async ({
|
||||
continue;
|
||||
} else if (tool === Tools.file_search) {
|
||||
requestedTools[tool] = async () => {
|
||||
const { files, toolContext } = await primeSearchFiles({
|
||||
...options,
|
||||
agentId: agent?.id,
|
||||
});
|
||||
const { files, toolContext } = await primeSearchFiles(options);
|
||||
if (toolContext) {
|
||||
toolContextMap[tool] = toolContext;
|
||||
}
|
||||
|
||||
/** @type {boolean | undefined} Check if user has FILE_CITATIONS permission */
|
||||
let fileCitations;
|
||||
if (fileCitations == null && options.req?.user != null) {
|
||||
try {
|
||||
fileCitations = await checkAccess({
|
||||
user: options.req.user,
|
||||
permissionType: PermissionTypes.FILE_CITATIONS,
|
||||
permissions: [Permissions.USE],
|
||||
getRoleByName,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[handleTools] FILE_CITATIONS permission check failed:', error);
|
||||
fileCitations = false;
|
||||
}
|
||||
}
|
||||
|
||||
return createFileSearchTool({
|
||||
userId: user,
|
||||
files,
|
||||
entity_id: agent?.id,
|
||||
fileCitations,
|
||||
});
|
||||
return createFileSearchTool({ req: options.req, files, entity_id: agent?.id });
|
||||
};
|
||||
continue;
|
||||
} else if (tool === Tools.web_search) {
|
||||
const webSearchConfig = options?.req?.app?.locals?.webSearch;
|
||||
const result = await loadWebSearchAuth({
|
||||
userId: user,
|
||||
loadAuthValues,
|
||||
webSearchConfig: webSearch,
|
||||
webSearchConfig,
|
||||
});
|
||||
const { onSearchResults, onGetHighlights } = options?.[Tools.web_search] ?? {};
|
||||
requestedTools[tool] = async () => {
|
||||
@@ -344,34 +295,14 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
});
|
||||
};
|
||||
continue;
|
||||
} else if (tool && mcpToolPattern.test(tool)) {
|
||||
const [toolName, serverName] = tool.split(Constants.mcp_delimiter);
|
||||
if (toolName === Constants.mcp_server) {
|
||||
/** Placeholder used for UI purposes */
|
||||
continue;
|
||||
}
|
||||
if (serverName && options.req?.config?.mcpConfig?.[serverName] == null) {
|
||||
logger.warn(
|
||||
`MCP server "${serverName}" for "${toolName}" tool is not configured${agent?.id != null && agent.id ? ` but attached to "${agent.id}"` : ''}`,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
if (toolName === Constants.mcp_all) {
|
||||
requestedMCPTools[serverName] = [
|
||||
{
|
||||
type: 'all',
|
||||
serverName,
|
||||
},
|
||||
];
|
||||
continue;
|
||||
}
|
||||
|
||||
requestedMCPTools[serverName] = requestedMCPTools[serverName] || [];
|
||||
requestedMCPTools[serverName].push({
|
||||
type: 'single',
|
||||
toolKey: tool,
|
||||
serverName,
|
||||
});
|
||||
} else if (tool && appTools[tool] && mcpToolPattern.test(tool)) {
|
||||
requestedTools[tool] = async () =>
|
||||
createMCPTool({
|
||||
req: options.req,
|
||||
toolKey: tool,
|
||||
model: agent?.model ?? model,
|
||||
provider: agent?.provider ?? endpoint,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -411,75 +342,6 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
}
|
||||
|
||||
const loadedTools = (await Promise.all(toolPromises)).flatMap((plugin) => plugin || []);
|
||||
const mcpToolPromises = [];
|
||||
/** MCP server tools are initialized sequentially by server */
|
||||
let index = -1;
|
||||
const failedMCPServers = new Set();
|
||||
const safeUser = createSafeUser(options.req?.user);
|
||||
for (const [serverName, toolConfigs] of Object.entries(requestedMCPTools)) {
|
||||
index++;
|
||||
/** @type {LCAvailableTools} */
|
||||
let availableTools;
|
||||
for (const config of toolConfigs) {
|
||||
try {
|
||||
if (failedMCPServers.has(serverName)) {
|
||||
continue;
|
||||
}
|
||||
const mcpParams = {
|
||||
index,
|
||||
signal,
|
||||
user: safeUser,
|
||||
userMCPAuthMap,
|
||||
res: options.res,
|
||||
model: agent?.model ?? model,
|
||||
serverName: config.serverName,
|
||||
provider: agent?.provider ?? endpoint,
|
||||
};
|
||||
|
||||
if (config.type === 'all' && toolConfigs.length === 1) {
|
||||
/** Handle async loading for single 'all' tool config */
|
||||
mcpToolPromises.push(
|
||||
createMCPTools(mcpParams).catch((error) => {
|
||||
logger.error(`Error loading ${serverName} tools:`, error);
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
if (!availableTools) {
|
||||
try {
|
||||
availableTools = await getMCPServerTools(serverName);
|
||||
} catch (error) {
|
||||
logger.error(`Error fetching available tools for MCP server ${serverName}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/** Handle synchronous loading */
|
||||
const mcpTool =
|
||||
config.type === 'all'
|
||||
? await createMCPTools(mcpParams)
|
||||
: await createMCPTool({
|
||||
...mcpParams,
|
||||
availableTools,
|
||||
toolKey: config.toolKey,
|
||||
});
|
||||
|
||||
if (Array.isArray(mcpTool)) {
|
||||
loadedTools.push(...mcpTool);
|
||||
} else if (mcpTool) {
|
||||
loadedTools.push(mcpTool);
|
||||
} else {
|
||||
failedMCPServers.add(serverName);
|
||||
logger.warn(
|
||||
`MCP tool creation failed for "${config.toolKey}", server may be unavailable or unauthenticated.`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Error loading MCP tool for server ${serverName}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
loadedTools.push(...(await Promise.all(mcpToolPromises)).flatMap((plugin) => plugin || []));
|
||||
return { loadedTools, toolContextMap };
|
||||
};
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const mockUser = {
|
||||
_id: 'fakeId',
|
||||
save: jest.fn(),
|
||||
findByIdAndDelete: jest.fn(),
|
||||
};
|
||||
|
||||
const mockPluginService = {
|
||||
updateUserPluginAuth: jest.fn(),
|
||||
@@ -7,38 +10,29 @@ const mockPluginService = {
|
||||
getUserPluginAuthValue: jest.fn(),
|
||||
};
|
||||
|
||||
jest.mock('~/server/services/PluginService', () => mockPluginService);
|
||||
|
||||
jest.mock('~/server/services/Config', () => ({
|
||||
getAppConfig: jest.fn().mockResolvedValue({
|
||||
// Default app config for tool tests
|
||||
paths: { uploads: '/tmp' },
|
||||
fileStrategy: 'local',
|
||||
filteredTools: [],
|
||||
includedTools: [],
|
||||
}),
|
||||
getCachedTools: jest.fn().mockResolvedValue({
|
||||
// Default cached tools for tests
|
||||
dalle: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'dalle',
|
||||
description: 'DALL-E image generation',
|
||||
parameters: {},
|
||||
},
|
||||
},
|
||||
}),
|
||||
const mockModels = {
|
||||
User: mockUser,
|
||||
};
|
||||
jest.mock('~/db/connect', () => {
|
||||
return {
|
||||
connectDb: jest.fn(),
|
||||
User: mockModels.mockUser,
|
||||
};
|
||||
});
|
||||
jest.mock('~/models/File', () => ({
|
||||
File: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/PluginService', () => mockPluginService);
|
||||
|
||||
const { BaseLLM } = require('@langchain/openai');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
|
||||
const { User } = require('~/db/models');
|
||||
const PluginService = require('~/server/services/PluginService');
|
||||
const { validateTools, loadTools, loadToolWithAuth } = require('./handleTools');
|
||||
const { StructuredSD, availableTools, DALLE3 } = require('../');
|
||||
|
||||
describe('Tool Handlers', () => {
|
||||
let mongoServer;
|
||||
let fakeUser;
|
||||
const pluginKey = 'dalle';
|
||||
const pluginKey2 = 'wolfram';
|
||||
@@ -49,9 +43,7 @@ describe('Tool Handlers', () => {
|
||||
const authConfigs = mainPlugin.authConfig;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
mockUser.save.mockResolvedValue(undefined);
|
||||
|
||||
const userAuthValues = {};
|
||||
mockPluginService.getUserPluginAuthValue.mockImplementation((userId, authField) => {
|
||||
@@ -66,7 +58,7 @@ describe('Tool Handlers', () => {
|
||||
},
|
||||
);
|
||||
|
||||
fakeUser = new User({
|
||||
fakeUser = await mockModels.User.createUser({
|
||||
name: 'Fake User',
|
||||
username: 'fakeuser',
|
||||
email: 'fakeuser@example.com',
|
||||
@@ -92,36 +84,9 @@ describe('Tool Handlers', () => {
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear mocks but not the database since we need the user to persist
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Reset the mock implementations
|
||||
const userAuthValues = {};
|
||||
mockPluginService.getUserPluginAuthValue.mockImplementation((userId, authField) => {
|
||||
return userAuthValues[`${userId}-${authField}`];
|
||||
});
|
||||
mockPluginService.updateUserPluginAuth.mockImplementation(
|
||||
(userId, authField, _pluginKey, credential) => {
|
||||
const fields = authField.split('||');
|
||||
fields.forEach((field) => {
|
||||
userAuthValues[`${userId}-${field}`] = credential;
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
// Re-add the auth configs for the user
|
||||
await mockUser.findByIdAndDelete(fakeUser._id);
|
||||
for (const authConfig of authConfigs) {
|
||||
await PluginService.updateUserPluginAuth(
|
||||
fakeUser._id,
|
||||
authConfig.authField,
|
||||
pluginKey,
|
||||
mockCredential,
|
||||
);
|
||||
await PluginService.deleteUserPluginAuth(fakeUser._id, authConfig.authField);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -171,6 +136,7 @@ describe('Tool Handlers', () => {
|
||||
beforeAll(async () => {
|
||||
const toolMap = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseLLM,
|
||||
tools: sampleTools,
|
||||
returnMap: true,
|
||||
useSpecs: true,
|
||||
@@ -264,6 +230,7 @@ describe('Tool Handlers', () => {
|
||||
it('returns an empty object when no tools are requested', async () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseLLM,
|
||||
returnMap: true,
|
||||
useSpecs: true,
|
||||
});
|
||||
@@ -273,6 +240,7 @@ describe('Tool Handlers', () => {
|
||||
process.env.SD_WEBUI_URL = mockCredential;
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseLLM,
|
||||
tools: ['stable-diffusion'],
|
||||
functions: true,
|
||||
returnMap: true,
|
||||
|
||||
3
api/cache/banViolation.js
vendored
3
api/cache/banViolation.js
vendored
@@ -1,8 +1,7 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, math } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { isEnabled, math, removePorts } = require('~/server/utils');
|
||||
const { deleteAllUserSessions } = require('~/models');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const getLogStores = require('./getLogStores');
|
||||
|
||||
const { BAN_VIOLATIONS, BAN_INTERVAL } = process.env ?? {};
|
||||
|
||||
79
api/cache/banViolation.spec.js
vendored
79
api/cache/banViolation.spec.js
vendored
@@ -1,28 +1,69 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const banViolation = require('./banViolation');
|
||||
|
||||
// Mock deleteAllUserSessions since we're testing ban logic, not session deletion
|
||||
jest.mock('~/models', () => ({
|
||||
...jest.requireActual('~/models'),
|
||||
deleteAllUserSessions: jest.fn().mockResolvedValue(true),
|
||||
const mockModels = {
|
||||
Session: {
|
||||
deleteAllUserSessions: jest.fn(),
|
||||
},
|
||||
};
|
||||
|
||||
jest.mock('~/db/connect', () => {
|
||||
return {
|
||||
connectDb: jest.fn(),
|
||||
get models() {
|
||||
return mockModels;
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
jest.mock('~/server/utils', () => ({
|
||||
isEnabled: jest.fn(() => true), // default to false, override per test if needed
|
||||
math: jest.fn(() => 20), // default to false, override per test if needed
|
||||
removePorts: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('keyv');
|
||||
// jest.mock('../models/Session');
|
||||
// Mocking the getLogStores function
|
||||
jest.mock('./getLogStores', () => {
|
||||
return jest.fn().mockImplementation(() => {
|
||||
const EventEmitter = require('events');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const math = require('../server/utils/math');
|
||||
const mockGet = jest.fn();
|
||||
const mockSet = jest.fn();
|
||||
class KeyvMongo extends EventEmitter {
|
||||
constructor(url = 'mongodb://127.0.0.1:27017', options) {
|
||||
super();
|
||||
this.ttlSupport = false;
|
||||
url = url ?? {};
|
||||
if (typeof url === 'string') {
|
||||
url = { url };
|
||||
}
|
||||
if (url.uri) {
|
||||
url = { url: url.uri, ...url };
|
||||
}
|
||||
this.opts = {
|
||||
url,
|
||||
collection: 'keyv',
|
||||
...url,
|
||||
...options,
|
||||
};
|
||||
}
|
||||
|
||||
get = mockGet;
|
||||
set = mockSet;
|
||||
}
|
||||
|
||||
return new KeyvMongo('', {
|
||||
namespace: CacheKeys.BANS,
|
||||
ttl: math(process.env.BAN_DURATION, 7200000),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('banViolation', () => {
|
||||
let mongoServer;
|
||||
let req, res, errorMessage;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
req = {
|
||||
ip: '127.0.0.1',
|
||||
@@ -35,7 +76,7 @@ describe('banViolation', () => {
|
||||
};
|
||||
errorMessage = {
|
||||
type: 'someViolation',
|
||||
user_id: new mongoose.Types.ObjectId().toString(), // Use valid ObjectId
|
||||
user_id: '12345',
|
||||
prev_count: 0,
|
||||
violation_count: 0,
|
||||
};
|
||||
|
||||
2
api/cache/clearPendingReq.js
vendored
2
api/cache/clearPendingReq.js
vendored
@@ -1,5 +1,5 @@
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { Time, CacheKeys } = require('librechat-data-provider');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const getLogStores = require('./getLogStores');
|
||||
|
||||
const { USE_REDIS, LIMIT_CONCURRENT_MESSAGES } = process.env ?? {};
|
||||
|
||||
167
api/cache/getLogStores.js
vendored
167
api/cache/getLogStores.js
vendored
@@ -1,56 +1,108 @@
|
||||
const { Keyv } = require('keyv');
|
||||
const { Time, CacheKeys, ViolationTypes } = require('librechat-data-provider');
|
||||
const {
|
||||
logFile,
|
||||
keyvMongo,
|
||||
cacheConfig,
|
||||
sessionCache,
|
||||
standardCache,
|
||||
violationCache,
|
||||
} = require('@librechat/api');
|
||||
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
|
||||
const { logFile, violationFile } = require('./keyvFiles');
|
||||
const { math, isEnabled } = require('~/server/utils');
|
||||
const keyvRedis = require('./keyvRedis');
|
||||
const keyvMongo = require('./keyvMongo');
|
||||
|
||||
const { BAN_DURATION, USE_REDIS, DEBUG_MEMORY_CACHE, CI } = process.env ?? {};
|
||||
|
||||
const duration = math(BAN_DURATION, 7200000);
|
||||
const isRedisEnabled = isEnabled(USE_REDIS);
|
||||
const debugMemoryCache = isEnabled(DEBUG_MEMORY_CACHE);
|
||||
|
||||
const createViolationInstance = (namespace) => {
|
||||
const config = isRedisEnabled ? { store: keyvRedis } : { store: violationFile, namespace };
|
||||
return new Keyv(config);
|
||||
};
|
||||
|
||||
// Serve cache from memory so no need to clear it on startup/exit
|
||||
const pending_req = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.PENDING_REQ });
|
||||
|
||||
const config = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
|
||||
|
||||
const roles = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.ROLES });
|
||||
|
||||
const audioRuns = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const messages = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.ONE_MINUTE })
|
||||
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.ONE_MINUTE });
|
||||
|
||||
const flows = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.FLOWS, ttl: Time.ONE_MINUTE * 3 });
|
||||
|
||||
const tokenConfig = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
|
||||
|
||||
const genTitle = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
|
||||
|
||||
const s3ExpiryInterval = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.S3_EXPIRY_INTERVAL, ttl: Time.THIRTY_MINUTES });
|
||||
|
||||
const modelQueries = isEnabled(process.env.USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
|
||||
|
||||
const abortKeys = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const openIdExchangedTokensCache = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.OPENID_EXCHANGED_TOKENS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const namespaces = {
|
||||
[ViolationTypes.GENERAL]: new Keyv({ store: logFile, namespace: 'violations' }),
|
||||
[ViolationTypes.LOGINS]: violationCache(ViolationTypes.LOGINS),
|
||||
[ViolationTypes.CONCURRENT]: violationCache(ViolationTypes.CONCURRENT),
|
||||
[ViolationTypes.NON_BROWSER]: violationCache(ViolationTypes.NON_BROWSER),
|
||||
[ViolationTypes.MESSAGE_LIMIT]: violationCache(ViolationTypes.MESSAGE_LIMIT),
|
||||
[ViolationTypes.REGISTRATIONS]: violationCache(ViolationTypes.REGISTRATIONS),
|
||||
[ViolationTypes.TOKEN_BALANCE]: violationCache(ViolationTypes.TOKEN_BALANCE),
|
||||
[ViolationTypes.TTS_LIMIT]: violationCache(ViolationTypes.TTS_LIMIT),
|
||||
[ViolationTypes.STT_LIMIT]: violationCache(ViolationTypes.STT_LIMIT),
|
||||
[ViolationTypes.CONVO_ACCESS]: violationCache(ViolationTypes.CONVO_ACCESS),
|
||||
[ViolationTypes.TOOL_CALL_LIMIT]: violationCache(ViolationTypes.TOOL_CALL_LIMIT),
|
||||
[ViolationTypes.FILE_UPLOAD_LIMIT]: violationCache(ViolationTypes.FILE_UPLOAD_LIMIT),
|
||||
[ViolationTypes.VERIFY_EMAIL_LIMIT]: violationCache(ViolationTypes.VERIFY_EMAIL_LIMIT),
|
||||
[ViolationTypes.RESET_PASSWORD_LIMIT]: violationCache(ViolationTypes.RESET_PASSWORD_LIMIT),
|
||||
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: violationCache(ViolationTypes.ILLEGAL_MODEL_REQUEST),
|
||||
[ViolationTypes.BAN]: new Keyv({
|
||||
[CacheKeys.ROLES]: roles,
|
||||
[CacheKeys.CONFIG_STORE]: config,
|
||||
[CacheKeys.PENDING_REQ]: pending_req,
|
||||
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
|
||||
[CacheKeys.ENCODED_DOMAINS]: new Keyv({
|
||||
store: keyvMongo,
|
||||
namespace: CacheKeys.BANS,
|
||||
ttl: cacheConfig.BAN_DURATION,
|
||||
namespace: CacheKeys.ENCODED_DOMAINS,
|
||||
ttl: 0,
|
||||
}),
|
||||
|
||||
[CacheKeys.OPENID_SESSION]: sessionCache(CacheKeys.OPENID_SESSION),
|
||||
[CacheKeys.SAML_SESSION]: sessionCache(CacheKeys.SAML_SESSION),
|
||||
|
||||
[CacheKeys.ROLES]: standardCache(CacheKeys.ROLES),
|
||||
[CacheKeys.APP_CONFIG]: standardCache(CacheKeys.APP_CONFIG),
|
||||
[CacheKeys.CONFIG_STORE]: standardCache(CacheKeys.CONFIG_STORE),
|
||||
[CacheKeys.PENDING_REQ]: standardCache(CacheKeys.PENDING_REQ),
|
||||
[CacheKeys.ENCODED_DOMAINS]: new Keyv({ store: keyvMongo, namespace: CacheKeys.ENCODED_DOMAINS }),
|
||||
[CacheKeys.ABORT_KEYS]: standardCache(CacheKeys.ABORT_KEYS, Time.TEN_MINUTES),
|
||||
[CacheKeys.TOKEN_CONFIG]: standardCache(CacheKeys.TOKEN_CONFIG, Time.THIRTY_MINUTES),
|
||||
[CacheKeys.GEN_TITLE]: standardCache(CacheKeys.GEN_TITLE, Time.TWO_MINUTES),
|
||||
[CacheKeys.S3_EXPIRY_INTERVAL]: standardCache(CacheKeys.S3_EXPIRY_INTERVAL, Time.THIRTY_MINUTES),
|
||||
[CacheKeys.MODEL_QUERIES]: standardCache(CacheKeys.MODEL_QUERIES),
|
||||
[CacheKeys.AUDIO_RUNS]: standardCache(CacheKeys.AUDIO_RUNS, Time.TEN_MINUTES),
|
||||
[CacheKeys.MESSAGES]: standardCache(CacheKeys.MESSAGES, Time.ONE_MINUTE),
|
||||
[CacheKeys.FLOWS]: standardCache(CacheKeys.FLOWS, Time.ONE_MINUTE * 3),
|
||||
[CacheKeys.OPENID_EXCHANGED_TOKENS]: standardCache(
|
||||
CacheKeys.OPENID_EXCHANGED_TOKENS,
|
||||
Time.TEN_MINUTES,
|
||||
general: new Keyv({ store: logFile, namespace: 'violations' }),
|
||||
concurrent: createViolationInstance('concurrent'),
|
||||
non_browser: createViolationInstance('non_browser'),
|
||||
message_limit: createViolationInstance('message_limit'),
|
||||
token_balance: createViolationInstance(ViolationTypes.TOKEN_BALANCE),
|
||||
registrations: createViolationInstance('registrations'),
|
||||
[ViolationTypes.TTS_LIMIT]: createViolationInstance(ViolationTypes.TTS_LIMIT),
|
||||
[ViolationTypes.STT_LIMIT]: createViolationInstance(ViolationTypes.STT_LIMIT),
|
||||
[ViolationTypes.CONVO_ACCESS]: createViolationInstance(ViolationTypes.CONVO_ACCESS),
|
||||
[ViolationTypes.TOOL_CALL_LIMIT]: createViolationInstance(ViolationTypes.TOOL_CALL_LIMIT),
|
||||
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
|
||||
[ViolationTypes.VERIFY_EMAIL_LIMIT]: createViolationInstance(ViolationTypes.VERIFY_EMAIL_LIMIT),
|
||||
[ViolationTypes.RESET_PASSWORD_LIMIT]: createViolationInstance(
|
||||
ViolationTypes.RESET_PASSWORD_LIMIT,
|
||||
),
|
||||
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
|
||||
ViolationTypes.ILLEGAL_MODEL_REQUEST,
|
||||
),
|
||||
logins: createViolationInstance('logins'),
|
||||
[CacheKeys.ABORT_KEYS]: abortKeys,
|
||||
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
|
||||
[CacheKeys.GEN_TITLE]: genTitle,
|
||||
[CacheKeys.S3_EXPIRY_INTERVAL]: s3ExpiryInterval,
|
||||
[CacheKeys.MODEL_QUERIES]: modelQueries,
|
||||
[CacheKeys.AUDIO_RUNS]: audioRuns,
|
||||
[CacheKeys.MESSAGES]: messages,
|
||||
[CacheKeys.FLOWS]: flows,
|
||||
[CacheKeys.OPENID_EXCHANGED_TOKENS]: openIdExchangedTokensCache,
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -59,10 +111,7 @@ const namespaces = {
|
||||
*/
|
||||
function getTTLStores() {
|
||||
return Object.values(namespaces).filter(
|
||||
(store) =>
|
||||
store instanceof Keyv &&
|
||||
parseInt(store.opts?.ttl ?? '0') > 0 &&
|
||||
!store.opts?.store?.constructor?.name?.includes('Redis'), // Only include non-Redis stores
|
||||
(store) => store instanceof Keyv && typeof store.opts?.ttl === 'number' && store.opts.ttl > 0,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -98,18 +147,18 @@ async function clearExpiredFromCache(cache) {
|
||||
if (data?.expires && data.expires <= expiryTime) {
|
||||
const deleted = await cache.opts.store.delete(key);
|
||||
if (!deleted) {
|
||||
cacheConfig.DEBUG_MEMORY_CACHE &&
|
||||
debugMemoryCache &&
|
||||
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
|
||||
continue;
|
||||
}
|
||||
cleared++;
|
||||
}
|
||||
} catch (error) {
|
||||
cacheConfig.DEBUG_MEMORY_CACHE &&
|
||||
debugMemoryCache &&
|
||||
console.log(`[Cache] Error processing entry from ${cache.opts.namespace}:`, error);
|
||||
const deleted = await cache.opts.store.delete(key);
|
||||
if (!deleted) {
|
||||
cacheConfig.DEBUG_MEMORY_CACHE &&
|
||||
debugMemoryCache &&
|
||||
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
|
||||
continue;
|
||||
}
|
||||
@@ -118,7 +167,7 @@ async function clearExpiredFromCache(cache) {
|
||||
}
|
||||
|
||||
if (cleared > 0) {
|
||||
cacheConfig.DEBUG_MEMORY_CACHE &&
|
||||
debugMemoryCache &&
|
||||
console.log(
|
||||
`[Cache] Cleared ${cleared} entries older than ${ttl}ms from ${cache.opts.namespace}`,
|
||||
);
|
||||
@@ -159,7 +208,7 @@ async function clearAllExpiredFromCache() {
|
||||
}
|
||||
}
|
||||
|
||||
if (!cacheConfig.USE_REDIS && !cacheConfig.CI) {
|
||||
if (!isRedisEnabled && !isEnabled(CI)) {
|
||||
/** @type {Set<NodeJS.Timeout>} */
|
||||
const cleanupIntervals = new Set();
|
||||
|
||||
@@ -170,7 +219,7 @@ if (!cacheConfig.USE_REDIS && !cacheConfig.CI) {
|
||||
|
||||
cleanupIntervals.add(cleanup);
|
||||
|
||||
if (cacheConfig.DEBUG_MEMORY_CACHE) {
|
||||
if (debugMemoryCache) {
|
||||
const monitor = setInterval(() => {
|
||||
const ttlStores = getTTLStores();
|
||||
const memory = process.memoryUsage();
|
||||
@@ -191,13 +240,13 @@ if (!cacheConfig.USE_REDIS && !cacheConfig.CI) {
|
||||
}
|
||||
|
||||
const dispose = () => {
|
||||
cacheConfig.DEBUG_MEMORY_CACHE && console.log('[Cache] Cleaning up and shutting down...');
|
||||
debugMemoryCache && console.log('[Cache] Cleaning up and shutting down...');
|
||||
cleanupIntervals.forEach((interval) => clearInterval(interval));
|
||||
cleanupIntervals.clear();
|
||||
|
||||
// One final cleanup before exit
|
||||
clearAllExpiredFromCache().then(() => {
|
||||
cacheConfig.DEBUG_MEMORY_CACHE && console.log('[Cache] Final cleanup completed');
|
||||
debugMemoryCache && console.log('[Cache] Final cleanup completed');
|
||||
process.exit(0);
|
||||
});
|
||||
};
|
||||
|
||||
3
api/cache/index.js
vendored
3
api/cache/index.js
vendored
@@ -1,4 +1,5 @@
|
||||
const keyvFiles = require('./keyvFiles');
|
||||
const getLogStores = require('./getLogStores');
|
||||
const logViolation = require('./logViolation');
|
||||
|
||||
module.exports = { getLogStores, logViolation };
|
||||
module.exports = { ...keyvFiles, getLogStores, logViolation };
|
||||
|
||||
92
api/cache/ioredisClient.js
vendored
Normal file
92
api/cache/ioredisClient.js
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
const fs = require('fs');
|
||||
const Redis = require('ioredis');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_MAX_LISTENERS } = process.env;
|
||||
|
||||
/** @type {import('ioredis').Redis | import('ioredis').Cluster} */
|
||||
let ioredisClient;
|
||||
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
|
||||
|
||||
function mapURI(uri) {
|
||||
const regex =
|
||||
/^(?:(?<scheme>\w+):\/\/)?(?:(?<user>[^:@]+)(?::(?<password>[^@]+))?@)?(?<host>[\w.-]+)(?::(?<port>\d{1,5}))?$/;
|
||||
const match = uri.match(regex);
|
||||
|
||||
if (match) {
|
||||
const { scheme, user, password, host, port } = match.groups;
|
||||
|
||||
return {
|
||||
scheme: scheme || 'none',
|
||||
user: user || null,
|
||||
password: password || null,
|
||||
host: host || null,
|
||||
port: port || null,
|
||||
};
|
||||
} else {
|
||||
const parts = uri.split(':');
|
||||
if (parts.length === 2) {
|
||||
return {
|
||||
scheme: 'none',
|
||||
user: null,
|
||||
password: null,
|
||||
host: parts[0],
|
||||
port: parts[1],
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
scheme: 'none',
|
||||
user: null,
|
||||
password: null,
|
||||
host: uri,
|
||||
port: null,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (REDIS_URI && isEnabled(USE_REDIS)) {
|
||||
let redisOptions = null;
|
||||
|
||||
if (REDIS_CA) {
|
||||
const ca = fs.readFileSync(REDIS_CA);
|
||||
redisOptions = { tls: { ca } };
|
||||
}
|
||||
|
||||
if (isEnabled(USE_REDIS_CLUSTER)) {
|
||||
const hosts = REDIS_URI.split(',').map((item) => {
|
||||
var value = mapURI(item);
|
||||
|
||||
return {
|
||||
host: value.host,
|
||||
port: value.port,
|
||||
};
|
||||
});
|
||||
ioredisClient = new Redis.Cluster(hosts, { redisOptions });
|
||||
} else {
|
||||
ioredisClient = new Redis(REDIS_URI, redisOptions);
|
||||
}
|
||||
|
||||
ioredisClient.on('ready', () => {
|
||||
logger.info('IoRedis connection ready');
|
||||
});
|
||||
ioredisClient.on('reconnecting', () => {
|
||||
logger.info('IoRedis connection reconnecting');
|
||||
});
|
||||
ioredisClient.on('end', () => {
|
||||
logger.info('IoRedis connection ended');
|
||||
});
|
||||
ioredisClient.on('close', () => {
|
||||
logger.info('IoRedis connection closed');
|
||||
});
|
||||
ioredisClient.on('error', (err) => logger.error('IoRedis connection error:', err));
|
||||
ioredisClient.setMaxListeners(redis_max_listeners);
|
||||
logger.info(
|
||||
'[Optional] IoRedis initialized for rate limiters. If you have issues, disable Redis or restart the server.',
|
||||
);
|
||||
} else {
|
||||
logger.info('[Optional] IoRedis not initialized for rate limiters.');
|
||||
}
|
||||
|
||||
module.exports = ioredisClient;
|
||||
9
api/cache/keyvFiles.js
vendored
Normal file
9
api/cache/keyvFiles.js
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
const { KeyvFile } = require('keyv-file');
|
||||
|
||||
const logFile = new KeyvFile({ filename: './data/logs.json' }).setMaxListeners(20);
|
||||
const violationFile = new KeyvFile({ filename: './data/violations.json' }).setMaxListeners(20);
|
||||
|
||||
module.exports = {
|
||||
logFile,
|
||||
violationFile,
|
||||
};
|
||||
@@ -1,69 +1,65 @@
|
||||
import mongoose from 'mongoose';
|
||||
import { EventEmitter } from 'events';
|
||||
import { GridFSBucket } from 'mongodb';
|
||||
import { logger } from '@librechat/data-schemas';
|
||||
import type { Db, ReadPreference, Collection } from 'mongodb';
|
||||
// api/cache/keyvMongo.js
|
||||
const mongoose = require('mongoose');
|
||||
const EventEmitter = require('events');
|
||||
const { GridFSBucket } = require('mongodb');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
interface KeyvMongoOptions {
|
||||
url?: string;
|
||||
collection?: string;
|
||||
useGridFS?: boolean;
|
||||
readPreference?: ReadPreference;
|
||||
}
|
||||
|
||||
interface GridFSClient {
|
||||
bucket: GridFSBucket;
|
||||
store: Collection;
|
||||
db: Db;
|
||||
}
|
||||
|
||||
interface CollectionClient {
|
||||
store: Collection;
|
||||
db: Db;
|
||||
}
|
||||
|
||||
type Client = GridFSClient | CollectionClient;
|
||||
|
||||
const storeMap = new Map<string, Client>();
|
||||
const storeMap = new Map();
|
||||
|
||||
class KeyvMongoCustom extends EventEmitter {
|
||||
private opts: KeyvMongoOptions;
|
||||
public ttlSupport: boolean;
|
||||
public namespace?: string;
|
||||
|
||||
constructor(options: KeyvMongoOptions = {}) {
|
||||
constructor(url, options = {}) {
|
||||
super();
|
||||
|
||||
url = url || {};
|
||||
if (typeof url === 'string') {
|
||||
url = { url };
|
||||
}
|
||||
if (url.uri) {
|
||||
url = { url: url.uri, ...url };
|
||||
}
|
||||
|
||||
this.opts = {
|
||||
url: 'mongodb://127.0.0.1:27017',
|
||||
collection: 'keyv',
|
||||
...url,
|
||||
...options,
|
||||
};
|
||||
|
||||
this.ttlSupport = false;
|
||||
|
||||
// Filter valid options
|
||||
const keyvMongoKeys = new Set([
|
||||
'url',
|
||||
'collection',
|
||||
'namespace',
|
||||
'serialize',
|
||||
'deserialize',
|
||||
'uri',
|
||||
'useGridFS',
|
||||
'dialect',
|
||||
]);
|
||||
this.opts = Object.fromEntries(Object.entries(this.opts).filter(([k]) => keyvMongoKeys.has(k)));
|
||||
}
|
||||
|
||||
// Helper to access the store WITHOUT storing a promise on the instance
|
||||
private async _getClient(): Promise<Client> {
|
||||
_getClient() {
|
||||
const storeKey = `${this.opts.collection}:${this.opts.useGridFS ? 'gridfs' : 'collection'}`;
|
||||
|
||||
// If we already have the store initialized, return it directly
|
||||
if (storeMap.has(storeKey)) {
|
||||
return storeMap.get(storeKey)!;
|
||||
return Promise.resolve(storeMap.get(storeKey));
|
||||
}
|
||||
|
||||
// Check mongoose connection state
|
||||
if (mongoose.connection.readyState !== 1) {
|
||||
throw new Error('Mongoose connection not ready. Ensure connectDb() is called first.');
|
||||
return Promise.reject(
|
||||
new Error('Mongoose connection not ready. Ensure connectDb() is called first.'),
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
const db = mongoose.connection.db as unknown as Db | undefined;
|
||||
if (!db) {
|
||||
throw new Error('MongoDB database not available');
|
||||
}
|
||||
|
||||
let client: Client;
|
||||
const db = mongoose.connection.db;
|
||||
let client;
|
||||
|
||||
if (this.opts.useGridFS) {
|
||||
const bucket = new GridFSBucket(db, {
|
||||
@@ -79,17 +75,17 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
}
|
||||
|
||||
storeMap.set(storeKey, client);
|
||||
return client;
|
||||
return Promise.resolve(client);
|
||||
} catch (error) {
|
||||
this.emit('error', error);
|
||||
throw error;
|
||||
return Promise.reject(error);
|
||||
}
|
||||
}
|
||||
|
||||
async get(key: string): Promise<unknown> {
|
||||
async get(key) {
|
||||
const client = await this._getClient();
|
||||
|
||||
if (this.opts.useGridFS && this.isGridFSClient(client)) {
|
||||
if (this.opts.useGridFS) {
|
||||
await client.store.updateOne(
|
||||
{
|
||||
filename: key,
|
||||
@@ -104,7 +100,7 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
const stream = client.bucket.openDownloadStreamByName(key);
|
||||
|
||||
return new Promise((resolve) => {
|
||||
const resp: Uint8Array[] = [];
|
||||
const resp = [];
|
||||
stream.on('error', () => {
|
||||
resolve(undefined);
|
||||
});
|
||||
@@ -114,7 +110,7 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
resolve(data);
|
||||
});
|
||||
|
||||
stream.on('data', (chunk: Uint8Array) => {
|
||||
stream.on('data', (chunk) => {
|
||||
resp.push(chunk);
|
||||
});
|
||||
});
|
||||
@@ -129,7 +125,7 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
return document.value;
|
||||
}
|
||||
|
||||
async getMany(keys: string[]): Promise<unknown[]> {
|
||||
async getMany(keys) {
|
||||
const client = await this._getClient();
|
||||
|
||||
if (this.opts.useGridFS) {
|
||||
@@ -139,9 +135,9 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
}
|
||||
|
||||
const values = await Promise.allSettled(promises);
|
||||
const data: unknown[] = [];
|
||||
const data = [];
|
||||
for (const value of values) {
|
||||
data.push(value.status === 'fulfilled' ? value.value : undefined);
|
||||
data.push(value.value);
|
||||
}
|
||||
|
||||
return data;
|
||||
@@ -152,7 +148,7 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
.project({ _id: 0, value: 1, key: 1 })
|
||||
.toArray();
|
||||
|
||||
const results: unknown[] = [...keys];
|
||||
const results = [...keys];
|
||||
let i = 0;
|
||||
for (const key of keys) {
|
||||
const rowIndex = values.findIndex((row) => row.key === key);
|
||||
@@ -163,11 +159,11 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
return results;
|
||||
}
|
||||
|
||||
async set(key: string, value: string, ttl?: number): Promise<unknown> {
|
||||
async set(key, value, ttl) {
|
||||
const client = await this._getClient();
|
||||
const expiresAt = typeof ttl === 'number' ? new Date(Date.now() + ttl) : null;
|
||||
|
||||
if (this.opts.useGridFS && this.isGridFSClient(client)) {
|
||||
if (this.opts.useGridFS) {
|
||||
const stream = client.bucket.openUploadStream(key, {
|
||||
metadata: {
|
||||
expiresAt,
|
||||
@@ -190,18 +186,20 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
);
|
||||
}
|
||||
|
||||
async delete(key: string): Promise<boolean> {
|
||||
async delete(key) {
|
||||
if (typeof key !== 'string') {
|
||||
return false;
|
||||
}
|
||||
|
||||
const client = await this._getClient();
|
||||
|
||||
if (this.opts.useGridFS && this.isGridFSClient(client)) {
|
||||
if (this.opts.useGridFS) {
|
||||
try {
|
||||
const bucket = new GridFSBucket(client.db, {
|
||||
bucketName: this.opts.collection,
|
||||
});
|
||||
const files = await bucket.find({ filename: key }).toArray();
|
||||
if (files.length > 0) {
|
||||
await client.bucket.delete(files[0]._id);
|
||||
}
|
||||
await client.bucket.delete(files[0]._id);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
@@ -212,10 +210,10 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
return object.deletedCount > 0;
|
||||
}
|
||||
|
||||
async deleteMany(keys: string[]): Promise<boolean> {
|
||||
async deleteMany(keys) {
|
||||
const client = await this._getClient();
|
||||
|
||||
if (this.opts.useGridFS && this.isGridFSClient(client)) {
|
||||
if (this.opts.useGridFS) {
|
||||
const bucket = new GridFSBucket(client.db, {
|
||||
bucketName: this.opts.collection,
|
||||
});
|
||||
@@ -232,17 +230,15 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
return object.deletedCount > 0;
|
||||
}
|
||||
|
||||
async clear(): Promise<void> {
|
||||
async clear() {
|
||||
const client = await this._getClient();
|
||||
|
||||
if (this.opts.useGridFS && this.isGridFSClient(client)) {
|
||||
if (this.opts.useGridFS) {
|
||||
try {
|
||||
await client.bucket.drop();
|
||||
} catch (error: unknown) {
|
||||
} catch (error) {
|
||||
// Throw error if not "namespace not found" error
|
||||
const errorCode =
|
||||
error instanceof Error && 'code' in error ? (error as { code?: number }).code : undefined;
|
||||
if (errorCode !== 26) {
|
||||
if (!(error.code === 26)) {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
@@ -253,7 +249,7 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
});
|
||||
}
|
||||
|
||||
async has(key: string): Promise<boolean> {
|
||||
async has(key) {
|
||||
const client = await this._getClient();
|
||||
const filter = { [this.opts.useGridFS ? 'filename' : 'key']: { $eq: key } };
|
||||
const document = await client.store.countDocuments(filter, { limit: 1 });
|
||||
@@ -261,14 +257,10 @@ class KeyvMongoCustom extends EventEmitter {
|
||||
}
|
||||
|
||||
// No-op disconnect
|
||||
async disconnect(): Promise<boolean> {
|
||||
async disconnect() {
|
||||
// This is a no-op since we don't want to close the shared mongoose connection
|
||||
return true;
|
||||
}
|
||||
|
||||
private isGridFSClient(client: Client): client is GridFSClient {
|
||||
return (client as GridFSClient).bucket != null;
|
||||
}
|
||||
}
|
||||
|
||||
const keyvMongo = new KeyvMongoCustom({
|
||||
@@ -277,4 +269,4 @@ const keyvMongo = new KeyvMongoCustom({
|
||||
|
||||
keyvMongo.on('error', (err) => logger.error('KeyvMongo connection error:', err));
|
||||
|
||||
export default keyvMongo;
|
||||
module.exports = keyvMongo;
|
||||
109
api/cache/keyvRedis.js
vendored
Normal file
109
api/cache/keyvRedis.js
vendored
Normal file
@@ -0,0 +1,109 @@
|
||||
const fs = require('fs');
|
||||
const ioredis = require('ioredis');
|
||||
const KeyvRedis = require('@keyv/redis').default;
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, REDIS_MAX_LISTENERS } =
|
||||
process.env;
|
||||
|
||||
let keyvRedis;
|
||||
const redis_prefix = REDIS_KEY_PREFIX || '';
|
||||
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
|
||||
|
||||
function mapURI(uri) {
|
||||
const regex =
|
||||
/^(?:(?<scheme>\w+):\/\/)?(?:(?<user>[^:@]+)(?::(?<password>[^@]+))?@)?(?<host>[\w.-]+)(?::(?<port>\d{1,5}))?$/;
|
||||
const match = uri.match(regex);
|
||||
|
||||
if (match) {
|
||||
const { scheme, user, password, host, port } = match.groups;
|
||||
|
||||
return {
|
||||
scheme: scheme || 'none',
|
||||
user: user || null,
|
||||
password: password || null,
|
||||
host: host || null,
|
||||
port: port || null,
|
||||
};
|
||||
} else {
|
||||
const parts = uri.split(':');
|
||||
if (parts.length === 2) {
|
||||
return {
|
||||
scheme: 'none',
|
||||
user: null,
|
||||
password: null,
|
||||
host: parts[0],
|
||||
port: parts[1],
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
scheme: 'none',
|
||||
user: null,
|
||||
password: null,
|
||||
host: uri,
|
||||
port: null,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (REDIS_URI && isEnabled(USE_REDIS)) {
|
||||
let redisOptions = null;
|
||||
/** @type {import('@keyv/redis').KeyvRedisOptions} */
|
||||
let keyvOpts = {
|
||||
useRedisSets: false,
|
||||
keyPrefix: redis_prefix,
|
||||
};
|
||||
|
||||
if (REDIS_CA) {
|
||||
const ca = fs.readFileSync(REDIS_CA);
|
||||
redisOptions = { tls: { ca } };
|
||||
}
|
||||
|
||||
if (isEnabled(USE_REDIS_CLUSTER)) {
|
||||
const hosts = REDIS_URI.split(',').map((item) => {
|
||||
var value = mapURI(item);
|
||||
|
||||
return {
|
||||
host: value.host,
|
||||
port: value.port,
|
||||
};
|
||||
});
|
||||
const cluster = new ioredis.Cluster(hosts, { redisOptions });
|
||||
keyvRedis = new KeyvRedis(cluster, keyvOpts);
|
||||
} else {
|
||||
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
|
||||
}
|
||||
|
||||
const pingInterval = setInterval(
|
||||
() => {
|
||||
logger.debug('KeyvRedis ping');
|
||||
keyvRedis.client.ping().catch((err) => logger.error('Redis keep-alive ping failed:', err));
|
||||
},
|
||||
5 * 60 * 1000,
|
||||
);
|
||||
|
||||
keyvRedis.on('ready', () => {
|
||||
logger.info('KeyvRedis connection ready');
|
||||
});
|
||||
keyvRedis.on('reconnecting', () => {
|
||||
logger.info('KeyvRedis connection reconnecting');
|
||||
});
|
||||
keyvRedis.on('end', () => {
|
||||
logger.info('KeyvRedis connection ended');
|
||||
});
|
||||
keyvRedis.on('close', () => {
|
||||
clearInterval(pingInterval);
|
||||
logger.info('KeyvRedis connection closed');
|
||||
});
|
||||
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
|
||||
keyvRedis.setMaxListeners(redis_max_listeners);
|
||||
logger.info(
|
||||
'[Optional] Redis initialized. If you have issues, or seeing older values, disable it or flush cache to refresh values.',
|
||||
);
|
||||
} else {
|
||||
logger.info('[Optional] Redis not initialized.');
|
||||
}
|
||||
|
||||
module.exports = keyvRedis;
|
||||
7
api/cache/logViolation.js
vendored
7
api/cache/logViolation.js
vendored
@@ -1,5 +1,4 @@
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const getLogStores = require('./getLogStores');
|
||||
const banViolation = require('./banViolation');
|
||||
|
||||
@@ -10,14 +9,14 @@ const banViolation = require('./banViolation');
|
||||
* @param {Object} res - Express response object.
|
||||
* @param {string} type - The type of violation.
|
||||
* @param {Object} errorMessage - The error message to log.
|
||||
* @param {number | string} [score=1] - The severity of the violation. Defaults to 1
|
||||
* @param {number} [score=1] - The severity of the violation. Defaults to 1
|
||||
*/
|
||||
const logViolation = async (req, res, type, errorMessage, score = 1) => {
|
||||
const userId = req.user?.id ?? req.user?._id;
|
||||
if (!userId) {
|
||||
return;
|
||||
}
|
||||
const logs = getLogStores(ViolationTypes.GENERAL);
|
||||
const logs = getLogStores('general');
|
||||
const violationLogs = getLogStores(type);
|
||||
const key = isEnabled(process.env.USE_REDIS) ? `${type}:${userId}` : userId;
|
||||
|
||||
|
||||
@@ -1,13 +1,28 @@
|
||||
const axios = require('axios');
|
||||
const { EventSource } = require('eventsource');
|
||||
const { Time } = require('librechat-data-provider');
|
||||
const { MCPManager, FlowStateManager, OAuthReconnectionManager } = require('@librechat/api');
|
||||
const { Time, CacheKeys } = require('librechat-data-provider');
|
||||
const { MCPManager, FlowStateManager } = require('librechat-mcp');
|
||||
const logger = require('./winston');
|
||||
|
||||
global.EventSource = EventSource;
|
||||
|
||||
/** @type {MCPManager} */
|
||||
let mcpManager = null;
|
||||
let flowManager = null;
|
||||
|
||||
/**
|
||||
* @param {string} [userId] - Optional user ID, to avoid disconnecting the current user.
|
||||
* @returns {MCPManager}
|
||||
*/
|
||||
function getMCPManager(userId) {
|
||||
if (!mcpManager) {
|
||||
mcpManager = MCPManager.getInstance(logger);
|
||||
} else {
|
||||
mcpManager.checkIdleConnections(userId);
|
||||
}
|
||||
return mcpManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Keyv} flowsCache
|
||||
* @returns {FlowStateManager}
|
||||
@@ -16,16 +31,66 @@ function getFlowStateManager(flowsCache) {
|
||||
if (!flowManager) {
|
||||
flowManager = new FlowStateManager(flowsCache, {
|
||||
ttl: Time.ONE_MINUTE * 3,
|
||||
logger,
|
||||
});
|
||||
}
|
||||
return flowManager;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends message data in Server Sent Events format.
|
||||
* @param {ServerResponse} res - The server response.
|
||||
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
|
||||
* @param {string} event.event - The type of event.
|
||||
* @param {string} event.data - The message to be sent.
|
||||
*/
|
||||
const sendEvent = (res, event) => {
|
||||
if (typeof event.data === 'string' && event.data.length === 0) {
|
||||
return;
|
||||
}
|
||||
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates and configures an Axios instance with optional proxy settings.
|
||||
*
|
||||
* @typedef {import('axios').AxiosInstance} AxiosInstance
|
||||
* @typedef {import('axios').AxiosProxyConfig} AxiosProxyConfig
|
||||
*
|
||||
* @returns {AxiosInstance} A configured Axios instance
|
||||
* @throws {Error} If there's an issue creating the Axios instance or parsing the proxy URL
|
||||
*/
|
||||
function createAxiosInstance() {
|
||||
const instance = axios.create();
|
||||
|
||||
if (process.env.proxy) {
|
||||
try {
|
||||
const url = new URL(process.env.proxy);
|
||||
|
||||
/** @type {AxiosProxyConfig} */
|
||||
const proxyConfig = {
|
||||
host: url.hostname.replace(/^\[|\]$/g, ''),
|
||||
protocol: url.protocol.replace(':', ''),
|
||||
};
|
||||
|
||||
if (url.port) {
|
||||
proxyConfig.port = parseInt(url.port, 10);
|
||||
}
|
||||
|
||||
instance.defaults.proxy = proxyConfig;
|
||||
} catch (error) {
|
||||
console.error('Error parsing proxy URL:', error);
|
||||
throw new Error(`Invalid proxy URL: ${process.env.proxy}`);
|
||||
}
|
||||
}
|
||||
|
||||
return instance;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
logger,
|
||||
createMCPManager: MCPManager.createInstance,
|
||||
getMCPManager: MCPManager.getInstance,
|
||||
sendEvent,
|
||||
getMCPManager,
|
||||
createAxiosInstance,
|
||||
getFlowStateManager,
|
||||
createOAuthReconnectionManager: OAuthReconnectionManager.createInstance,
|
||||
getOAuthReconnectionManager: OAuthReconnectionManager.getInstance,
|
||||
};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import axios from 'axios';
|
||||
import { createAxiosInstance } from './axios';
|
||||
const axios = require('axios');
|
||||
const { createAxiosInstance } = require('./index');
|
||||
|
||||
// Mock axios
|
||||
jest.mock('axios', () => ({
|
||||
interceptors: {
|
||||
request: { use: jest.fn(), eject: jest.fn() },
|
||||
@@ -19,13 +20,7 @@ jest.mock('axios', () => ({
|
||||
post: jest.fn().mockResolvedValue({ data: {} }),
|
||||
put: jest.fn().mockResolvedValue({ data: {} }),
|
||||
delete: jest.fn().mockResolvedValue({ data: {} }),
|
||||
reset: jest.fn().mockImplementation(function (this: {
|
||||
get: jest.Mock;
|
||||
post: jest.Mock;
|
||||
put: jest.Mock;
|
||||
delete: jest.Mock;
|
||||
create: jest.Mock;
|
||||
}) {
|
||||
reset: jest.fn().mockImplementation(function () {
|
||||
this.get.mockClear();
|
||||
this.post.mockClear();
|
||||
this.put.mockClear();
|
||||
@@ -1,34 +1,11 @@
|
||||
require('dotenv').config();
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
|
||||
const mongoose = require('mongoose');
|
||||
const MONGO_URI = process.env.MONGO_URI;
|
||||
|
||||
if (!MONGO_URI) {
|
||||
throw new Error('Please define the MONGO_URI environment variable');
|
||||
}
|
||||
/** The maximum number of connections in the connection pool. */
|
||||
const maxPoolSize = parseInt(process.env.MONGO_MAX_POOL_SIZE) || undefined;
|
||||
/** The minimum number of connections in the connection pool. */
|
||||
const minPoolSize = parseInt(process.env.MONGO_MIN_POOL_SIZE) || undefined;
|
||||
/** The maximum number of connections that may be in the process of being established concurrently by the connection pool. */
|
||||
const maxConnecting = parseInt(process.env.MONGO_MAX_CONNECTING) || undefined;
|
||||
/** The maximum number of milliseconds that a connection can remain idle in the pool before being removed and closed. */
|
||||
const maxIdleTimeMS = parseInt(process.env.MONGO_MAX_IDLE_TIME_MS) || undefined;
|
||||
/** The maximum time in milliseconds that a thread can wait for a connection to become available. */
|
||||
const waitQueueTimeoutMS = parseInt(process.env.MONGO_WAIT_QUEUE_TIMEOUT_MS) || undefined;
|
||||
/** Set to false to disable automatic index creation for all models associated with this connection. */
|
||||
const autoIndex =
|
||||
process.env.MONGO_AUTO_INDEX != undefined
|
||||
? isEnabled(process.env.MONGO_AUTO_INDEX) || false
|
||||
: undefined;
|
||||
|
||||
/** Set to `false` to disable Mongoose automatically calling `createCollection()` on every model created on this connection. */
|
||||
const autoCreate =
|
||||
process.env.MONGO_AUTO_CREATE != undefined
|
||||
? isEnabled(process.env.MONGO_AUTO_CREATE) || false
|
||||
: undefined;
|
||||
/**
|
||||
* Global is used here to maintain a cached connection across hot reloads
|
||||
* in development. This prevents connections growing exponentially
|
||||
@@ -49,21 +26,13 @@ async function connectDb() {
|
||||
if (!cached.promise || disconnected) {
|
||||
const opts = {
|
||||
bufferCommands: false,
|
||||
...(maxPoolSize ? { maxPoolSize } : {}),
|
||||
...(minPoolSize ? { minPoolSize } : {}),
|
||||
...(maxConnecting ? { maxConnecting } : {}),
|
||||
...(maxIdleTimeMS ? { maxIdleTimeMS } : {}),
|
||||
...(waitQueueTimeoutMS ? { waitQueueTimeoutMS } : {}),
|
||||
...(autoIndex != undefined ? { autoIndex } : {}),
|
||||
...(autoCreate != undefined ? { autoCreate } : {}),
|
||||
// useNewUrlParser: true,
|
||||
// useUnifiedTopology: true,
|
||||
// bufferMaxEntries: 0,
|
||||
// useFindAndModify: true,
|
||||
// useCreateIndex: true
|
||||
};
|
||||
logger.info('Mongo Connection options');
|
||||
logger.info(JSON.stringify(opts, null, 2));
|
||||
|
||||
mongoose.set('strictQuery', true);
|
||||
cached.promise = mongoose.connect(MONGO_URI, opts).then((mongoose) => {
|
||||
return mongoose;
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { isEnabled, FlowStateManager } = require('@librechat/api');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
const Conversation = mongoose.models.Conversation;
|
||||
const Message = mongoose.models.Message;
|
||||
@@ -29,165 +28,10 @@ class MeiliSearchClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes documents from MeiliSearch index that are missing the user field
|
||||
* @param {import('meilisearch').Index} index - MeiliSearch index instance
|
||||
* @param {string} indexName - Name of the index for logging
|
||||
* @returns {Promise<number>} - Number of documents deleted
|
||||
*/
|
||||
async function deleteDocumentsWithoutUserField(index, indexName) {
|
||||
let deletedCount = 0;
|
||||
let offset = 0;
|
||||
const batchSize = 1000;
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const searchResult = await index.search('', {
|
||||
limit: batchSize,
|
||||
offset: offset,
|
||||
});
|
||||
|
||||
if (searchResult.hits.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
const idsToDelete = searchResult.hits.filter((hit) => !hit.user).map((hit) => hit.id);
|
||||
|
||||
if (idsToDelete.length > 0) {
|
||||
logger.info(
|
||||
`[indexSync] Deleting ${idsToDelete.length} documents without user field from ${indexName} index`,
|
||||
);
|
||||
await index.deleteDocuments(idsToDelete);
|
||||
deletedCount += idsToDelete.length;
|
||||
}
|
||||
|
||||
if (searchResult.hits.length < batchSize) {
|
||||
break;
|
||||
}
|
||||
|
||||
offset += batchSize;
|
||||
}
|
||||
|
||||
if (deletedCount > 0) {
|
||||
logger.info(`[indexSync] Deleted ${deletedCount} orphaned documents from ${indexName} index`);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[indexSync] Error deleting documents from ${indexName}:`, error);
|
||||
async function indexSync() {
|
||||
if (!searchEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
return deletedCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures indexes have proper filterable attributes configured and checks if documents have user field
|
||||
* @param {MeiliSearch} client - MeiliSearch client instance
|
||||
* @returns {Promise<{settingsUpdated: boolean, orphanedDocsFound: boolean}>} - Status of what was done
|
||||
*/
|
||||
async function ensureFilterableAttributes(client) {
|
||||
let settingsUpdated = false;
|
||||
let hasOrphanedDocs = false;
|
||||
|
||||
try {
|
||||
// Check and update messages index
|
||||
try {
|
||||
const messagesIndex = client.index('messages');
|
||||
const settings = await messagesIndex.getSettings();
|
||||
|
||||
if (!settings.filterableAttributes || !settings.filterableAttributes.includes('user')) {
|
||||
logger.info('[indexSync] Configuring messages index to filter by user...');
|
||||
await messagesIndex.updateSettings({
|
||||
filterableAttributes: ['user'],
|
||||
});
|
||||
logger.info('[indexSync] Messages index configured for user filtering');
|
||||
settingsUpdated = true;
|
||||
}
|
||||
|
||||
// Check if existing documents have user field indexed
|
||||
try {
|
||||
const searchResult = await messagesIndex.search('', { limit: 1 });
|
||||
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
|
||||
logger.info(
|
||||
'[indexSync] Existing messages missing user field, will clean up orphaned documents...',
|
||||
);
|
||||
hasOrphanedDocs = true;
|
||||
}
|
||||
} catch (searchError) {
|
||||
logger.debug('[indexSync] Could not check message documents:', searchError.message);
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.code !== 'index_not_found') {
|
||||
logger.warn('[indexSync] Could not check/update messages index settings:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Check and update conversations index
|
||||
try {
|
||||
const convosIndex = client.index('convos');
|
||||
const settings = await convosIndex.getSettings();
|
||||
|
||||
if (!settings.filterableAttributes || !settings.filterableAttributes.includes('user')) {
|
||||
logger.info('[indexSync] Configuring convos index to filter by user...');
|
||||
await convosIndex.updateSettings({
|
||||
filterableAttributes: ['user'],
|
||||
});
|
||||
logger.info('[indexSync] Convos index configured for user filtering');
|
||||
settingsUpdated = true;
|
||||
}
|
||||
|
||||
// Check if existing documents have user field indexed
|
||||
try {
|
||||
const searchResult = await convosIndex.search('', { limit: 1 });
|
||||
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
|
||||
logger.info(
|
||||
'[indexSync] Existing conversations missing user field, will clean up orphaned documents...',
|
||||
);
|
||||
hasOrphanedDocs = true;
|
||||
}
|
||||
} catch (searchError) {
|
||||
logger.debug('[indexSync] Could not check conversation documents:', searchError.message);
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.code !== 'index_not_found') {
|
||||
logger.warn('[indexSync] Could not check/update convos index settings:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// If either index has orphaned documents, clean them up (but don't force resync)
|
||||
if (hasOrphanedDocs) {
|
||||
try {
|
||||
const messagesIndex = client.index('messages');
|
||||
await deleteDocumentsWithoutUserField(messagesIndex, 'messages');
|
||||
} catch (error) {
|
||||
logger.debug('[indexSync] Could not clean up messages:', error.message);
|
||||
}
|
||||
|
||||
try {
|
||||
const convosIndex = client.index('convos');
|
||||
await deleteDocumentsWithoutUserField(convosIndex, 'convos');
|
||||
} catch (error) {
|
||||
logger.debug('[indexSync] Could not clean up convos:', error.message);
|
||||
}
|
||||
|
||||
logger.info('[indexSync] Orphaned documents cleaned up without forcing resync.');
|
||||
}
|
||||
|
||||
if (settingsUpdated) {
|
||||
logger.info('[indexSync] Index settings updated. Full re-sync will be triggered.');
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[indexSync] Error ensuring filterable attributes:', error);
|
||||
}
|
||||
|
||||
return { settingsUpdated, orphanedDocsFound: hasOrphanedDocs };
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs the actual sync operations for messages and conversations
|
||||
* @param {FlowStateManager} flowManager - Flow state manager instance
|
||||
* @param {string} flowId - Flow identifier
|
||||
* @param {string} flowType - Flow type
|
||||
*/
|
||||
async function performSync(flowManager, flowId, flowType) {
|
||||
try {
|
||||
const client = MeiliSearchClient.getInstance();
|
||||
|
||||
@@ -198,142 +42,29 @@ async function performSync(flowManager, flowId, flowType) {
|
||||
|
||||
if (indexingDisabled === true) {
|
||||
logger.info('[indexSync] Indexing is disabled, skipping...');
|
||||
return { messagesSync: false, convosSync: false };
|
||||
}
|
||||
|
||||
/** Ensures indexes have proper filterable attributes configured */
|
||||
const { settingsUpdated, orphanedDocsFound: _orphanedDocsFound } =
|
||||
await ensureFilterableAttributes(client);
|
||||
|
||||
let messagesSync = false;
|
||||
let convosSync = false;
|
||||
|
||||
// Only reset flags if settings were actually updated (not just for orphaned doc cleanup)
|
||||
if (settingsUpdated) {
|
||||
logger.info(
|
||||
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
|
||||
);
|
||||
|
||||
// Reset sync flags to force full re-sync
|
||||
await Message.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
|
||||
await Conversation.collection.updateMany(
|
||||
{ _meiliIndex: true },
|
||||
{ $set: { _meiliIndex: false } },
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync messages
|
||||
const messageProgress = await Message.getSyncProgress();
|
||||
if (!messageProgress.isComplete || settingsUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
// Check if we should do a full sync or incremental
|
||||
const messageCount = await Message.countDocuments();
|
||||
const messagesIndexed = messageProgress.totalProcessed;
|
||||
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||
|
||||
if (messageCount - messagesIndexed > syncThreshold) {
|
||||
logger.info('[indexSync] Starting full message sync due to large difference');
|
||||
await Message.syncWithMeili();
|
||||
messagesSync = true;
|
||||
} else if (messageCount !== messagesIndexed) {
|
||||
logger.warn('[indexSync] Messages out of sync, performing incremental sync');
|
||||
await Message.syncWithMeili();
|
||||
messagesSync = true;
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`[indexSync] Messages are fully synced: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync conversations
|
||||
const convoProgress = await Conversation.getSyncProgress();
|
||||
if (!convoProgress.isComplete || settingsUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
const convoCount = await Conversation.countDocuments();
|
||||
const convosIndexed = convoProgress.totalProcessed;
|
||||
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||
|
||||
if (convoCount - convosIndexed > syncThreshold) {
|
||||
logger.info('[indexSync] Starting full conversation sync due to large difference');
|
||||
await Conversation.syncWithMeili();
|
||||
convosSync = true;
|
||||
} else if (convoCount !== convosIndexed) {
|
||||
logger.warn('[indexSync] Convos out of sync, performing incremental sync');
|
||||
await Conversation.syncWithMeili();
|
||||
convosSync = true;
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`[indexSync] Conversations are fully synced: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments}`,
|
||||
);
|
||||
}
|
||||
|
||||
return { messagesSync, convosSync };
|
||||
} finally {
|
||||
if (indexingDisabled === true) {
|
||||
logger.info('[indexSync] Indexing is disabled, skipping cleanup...');
|
||||
} else if (flowManager && flowId && flowType) {
|
||||
try {
|
||||
await flowManager.deleteFlow(flowId, flowType);
|
||||
logger.debug('[indexSync] Flow state cleaned up');
|
||||
} catch (cleanupErr) {
|
||||
logger.debug('[indexSync] Could not clean up flow state:', cleanupErr.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Main index sync function that uses FlowStateManager to prevent concurrent execution
|
||||
*/
|
||||
async function indexSync() {
|
||||
if (!searchEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.info('[indexSync] Starting index synchronization check...');
|
||||
|
||||
// Get or create FlowStateManager instance
|
||||
const flowsCache = getLogStores(CacheKeys.FLOWS);
|
||||
if (!flowsCache) {
|
||||
logger.warn('[indexSync] Flows cache not available, falling back to direct sync');
|
||||
return await performSync(null, null, null);
|
||||
}
|
||||
|
||||
const flowManager = new FlowStateManager(flowsCache, {
|
||||
ttl: 60000 * 10, // 10 minutes TTL for sync operations
|
||||
});
|
||||
|
||||
// Use a unique flow ID for the sync operation
|
||||
const flowId = 'meili-index-sync';
|
||||
const flowType = 'MEILI_SYNC';
|
||||
|
||||
try {
|
||||
// This will only execute the handler if no other instance is running the sync
|
||||
const result = await flowManager.createFlowWithHandler(flowId, flowType, () =>
|
||||
performSync(flowManager, flowId, flowType),
|
||||
);
|
||||
|
||||
if (result.messagesSync || result.convosSync) {
|
||||
logger.info('[indexSync] Sync completed successfully');
|
||||
} else {
|
||||
logger.debug('[indexSync] No sync was needed');
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (err) {
|
||||
if (err.message.includes('flow already exists')) {
|
||||
logger.info('[indexSync] Sync already running on another instance');
|
||||
return;
|
||||
}
|
||||
|
||||
const messageCount = await Message.countDocuments();
|
||||
const convoCount = await Conversation.countDocuments();
|
||||
const messages = await client.index('messages').getStats();
|
||||
const convos = await client.index('convos').getStats();
|
||||
const messagesIndexed = messages.numberOfDocuments;
|
||||
const convosIndexed = convos.numberOfDocuments;
|
||||
|
||||
logger.debug(`[indexSync] There are ${messageCount} messages and ${messagesIndexed} indexed`);
|
||||
logger.debug(`[indexSync] There are ${convoCount} convos and ${convosIndexed} indexed`);
|
||||
|
||||
if (messageCount !== messagesIndexed) {
|
||||
logger.debug('[indexSync] Messages out of sync, indexing');
|
||||
Message.syncWithMeili();
|
||||
}
|
||||
|
||||
if (convoCount !== convosIndexed) {
|
||||
logger.debug('[indexSync] Convos out of sync, indexing');
|
||||
Conversation.syncWithMeili();
|
||||
}
|
||||
} catch (err) {
|
||||
if (err.message.includes('not found')) {
|
||||
logger.debug('[indexSync] Creating indices...');
|
||||
currentTimeout = setTimeout(async () => {
|
||||
|
||||
@@ -3,7 +3,6 @@ module.exports = {
|
||||
clearMocks: true,
|
||||
roots: ['<rootDir>'],
|
||||
coverageDirectory: 'coverage',
|
||||
testTimeout: 30000, // 30 seconds timeout for all tests
|
||||
setupFiles: [
|
||||
'./test/jestSetup.js',
|
||||
'./test/__mocks__/logger.js',
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const { Action } = require('~/db/models');
|
||||
const mongoose = require('mongoose');
|
||||
const Action = require('~/db/models').Action;
|
||||
|
||||
/**
|
||||
* Update an action with new data without overwriting existing properties,
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
const mongoose = require('mongoose');
|
||||
const crypto = require('node:crypto');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { ResourceType, SystemRoles, Tools, actionDelimiter } = require('librechat-data-provider');
|
||||
const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_all, mcp_delimiter } =
|
||||
const { SystemRoles, Tools, actionDelimiter } = require('librechat-data-provider');
|
||||
const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_delimiter } =
|
||||
require('librechat-data-provider').Constants;
|
||||
const { CONFIG_STORE, STARTUP_CONFIG } = require('librechat-data-provider').CacheKeys;
|
||||
const {
|
||||
removeAgentFromAllProjects,
|
||||
removeAgentIdsFromProject,
|
||||
addAgentIdsToProject,
|
||||
getProjectByName,
|
||||
addAgentIdsToProject,
|
||||
removeAgentIdsFromProject,
|
||||
removeAgentFromAllProjects,
|
||||
} = require('./Project');
|
||||
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
||||
const { getMCPServerTools } = require('~/server/services/Config');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { getActions } = require('./Action');
|
||||
const { Agent } = require('~/db/models');
|
||||
|
||||
const Agent = require('~/db/models').Agent;
|
||||
|
||||
/**
|
||||
* Create an agent with the provided data.
|
||||
@@ -22,7 +23,7 @@ const { Agent } = require('~/db/models');
|
||||
* @throws {Error} If the agent creation fails.
|
||||
*/
|
||||
const createAgent = async (agentData) => {
|
||||
const { author: _author, ...versionData } = agentData;
|
||||
const { author, ...versionData } = agentData;
|
||||
const timestamp = new Date();
|
||||
const initialAgentData = {
|
||||
...agentData,
|
||||
@@ -33,9 +34,7 @@ const createAgent = async (agentData) => {
|
||||
updatedAt: timestamp,
|
||||
},
|
||||
],
|
||||
category: agentData.category || 'general',
|
||||
};
|
||||
|
||||
return (await Agent.create(initialAgentData)).toObject();
|
||||
};
|
||||
|
||||
@@ -49,72 +48,46 @@ const createAgent = async (agentData) => {
|
||||
*/
|
||||
const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Get multiple agent documents based on the provided search parameters.
|
||||
*
|
||||
* @param {Object} searchParameter - The search parameters to find agents.
|
||||
* @returns {Promise<Agent[]>} Array of agent documents as plain objects.
|
||||
*/
|
||||
const getAgents = async (searchParameter) => await Agent.find(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Load an agent based on the provided ID
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.spec
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.endpoint
|
||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||
* @returns {Agent|null} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_parameters: _m }) => {
|
||||
const loadEphemeralAgent = ({ req, agent_id, endpoint, model_parameters: _m }) => {
|
||||
const { model, ...model_parameters } = _m;
|
||||
const modelSpecs = req.config?.modelSpecs?.list;
|
||||
/** @type {TModelSpec | null} */
|
||||
let modelSpec = null;
|
||||
if (spec != null && spec !== '') {
|
||||
modelSpec = modelSpecs?.find((s) => s.name === spec) || null;
|
||||
}
|
||||
/** @type {Record<string, FunctionTool>} */
|
||||
const availableTools = req.app.locals.availableTools;
|
||||
/** @type {TEphemeralAgent | null} */
|
||||
const ephemeralAgent = req.body.ephemeralAgent;
|
||||
const mcpServers = new Set(ephemeralAgent?.mcp);
|
||||
if (modelSpec?.mcpServers) {
|
||||
for (const mcpServer of modelSpec.mcpServers) {
|
||||
mcpServers.add(mcpServer);
|
||||
}
|
||||
}
|
||||
/** @type {string[]} */
|
||||
const tools = [];
|
||||
if (ephemeralAgent?.execute_code === true || modelSpec?.executeCode === true) {
|
||||
if (ephemeralAgent?.execute_code === true) {
|
||||
tools.push(Tools.execute_code);
|
||||
}
|
||||
if (ephemeralAgent?.file_search === true || modelSpec?.fileSearch === true) {
|
||||
tools.push(Tools.file_search);
|
||||
}
|
||||
if (ephemeralAgent?.web_search === true || modelSpec?.webSearch === true) {
|
||||
if (ephemeralAgent?.web_search === true) {
|
||||
tools.push(Tools.web_search);
|
||||
}
|
||||
|
||||
const addedServers = new Set();
|
||||
if (mcpServers.size > 0) {
|
||||
for (const mcpServer of mcpServers) {
|
||||
if (addedServers.has(mcpServer)) {
|
||||
for (const toolName of Object.keys(availableTools)) {
|
||||
if (!toolName.includes(mcp_delimiter)) {
|
||||
continue;
|
||||
}
|
||||
const serverTools = await getMCPServerTools(mcpServer);
|
||||
if (!serverTools) {
|
||||
tools.push(`${mcp_all}${mcp_delimiter}${mcpServer}`);
|
||||
addedServers.add(mcpServer);
|
||||
continue;
|
||||
const mcpServer = toolName.split(mcp_delimiter)?.[1];
|
||||
if (mcpServer && mcpServers.has(mcpServer)) {
|
||||
tools.push(toolName);
|
||||
}
|
||||
tools.push(...Object.keys(serverTools));
|
||||
addedServers.add(mcpServer);
|
||||
}
|
||||
}
|
||||
|
||||
const instructions = req.body.promptPrefix;
|
||||
const result = {
|
||||
return {
|
||||
id: agent_id,
|
||||
instructions,
|
||||
provider: endpoint,
|
||||
@@ -122,11 +95,6 @@ const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_paramet
|
||||
model,
|
||||
tools,
|
||||
};
|
||||
|
||||
if (ephemeralAgent?.artifacts != null && ephemeralAgent.artifacts) {
|
||||
result.artifacts = ephemeralAgent.artifacts;
|
||||
}
|
||||
return result;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -134,18 +102,17 @@ const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_paramet
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.spec
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.endpoint
|
||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadAgent = async ({ req, spec, agent_id, endpoint, model_parameters }) => {
|
||||
const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
|
||||
if (!agent_id) {
|
||||
return null;
|
||||
}
|
||||
if (agent_id === EPHEMERAL_AGENT_ID) {
|
||||
return await loadEphemeralAgent({ req, spec, agent_id, endpoint, model_parameters });
|
||||
return loadEphemeralAgent({ req, agent_id, endpoint, model_parameters });
|
||||
}
|
||||
const agent = await getAgent({
|
||||
id: agent_id,
|
||||
@@ -156,7 +123,29 @@ const loadAgent = async ({ req, spec, agent_id, endpoint, model_parameters }) =>
|
||||
}
|
||||
|
||||
agent.version = agent.versions ? agent.versions.length : 0;
|
||||
return agent;
|
||||
|
||||
if (agent.author.toString() === req.user.id) {
|
||||
return agent;
|
||||
}
|
||||
|
||||
if (!agent.projectIds) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const cache = getLogStores(CONFIG_STORE);
|
||||
/** @type {TStartupConfig} */
|
||||
const cachedStartupConfig = await cache.get(STARTUP_CONFIG);
|
||||
let { instanceProjectId } = cachedStartupConfig ?? {};
|
||||
if (!instanceProjectId) {
|
||||
instanceProjectId = (await getProjectByName(GLOBAL_PROJECT_NAME, '_id'))._id.toString();
|
||||
}
|
||||
|
||||
for (const projectObjectId of agent.projectIds) {
|
||||
const projectId = projectObjectId.toString();
|
||||
if (projectId === instanceProjectId) {
|
||||
return agent;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -182,11 +171,12 @@ const isDuplicateVersion = (updateData, currentData, versions, actionsHash = nul
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'__v',
|
||||
'agent_ids',
|
||||
'versions',
|
||||
'actionsHash', // Exclude actionsHash from direct comparison
|
||||
];
|
||||
|
||||
const { $push: _$push, $pull: _$pull, $addToSet: _$addToSet, ...directUpdates } = updateData;
|
||||
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
||||
|
||||
if (Object.keys(directUpdates).length === 0 && !actionsHash) {
|
||||
return null;
|
||||
@@ -205,116 +195,54 @@ const isDuplicateVersion = (updateData, currentData, versions, actionsHash = nul
|
||||
|
||||
let isMatch = true;
|
||||
for (const field of importantFields) {
|
||||
const wouldBeValue = wouldBeVersion[field];
|
||||
const lastVersionValue = lastVersion[field];
|
||||
|
||||
// Skip if both are undefined/null
|
||||
if (!wouldBeValue && !lastVersionValue) {
|
||||
if (!wouldBeVersion[field] && !lastVersion[field]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle arrays
|
||||
if (Array.isArray(wouldBeValue) || Array.isArray(lastVersionValue)) {
|
||||
// Normalize: treat undefined/null as empty array for comparison
|
||||
let wouldBeArr;
|
||||
if (Array.isArray(wouldBeValue)) {
|
||||
wouldBeArr = wouldBeValue;
|
||||
} else if (wouldBeValue == null) {
|
||||
wouldBeArr = [];
|
||||
} else {
|
||||
wouldBeArr = [wouldBeValue];
|
||||
}
|
||||
|
||||
let lastVersionArr;
|
||||
if (Array.isArray(lastVersionValue)) {
|
||||
lastVersionArr = lastVersionValue;
|
||||
} else if (lastVersionValue == null) {
|
||||
lastVersionArr = [];
|
||||
} else {
|
||||
lastVersionArr = [lastVersionValue];
|
||||
}
|
||||
|
||||
if (wouldBeArr.length !== lastVersionArr.length) {
|
||||
if (Array.isArray(wouldBeVersion[field]) && Array.isArray(lastVersion[field])) {
|
||||
if (wouldBeVersion[field].length !== lastVersion[field].length) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// Special handling for projectIds (MongoDB ObjectIds)
|
||||
if (field === 'projectIds') {
|
||||
const wouldBeIds = wouldBeArr.map((id) => id.toString()).sort();
|
||||
const versionIds = lastVersionArr.map((id) => id.toString()).sort();
|
||||
const wouldBeIds = wouldBeVersion[field].map((id) => id.toString()).sort();
|
||||
const versionIds = lastVersion[field].map((id) => id.toString()).sort();
|
||||
|
||||
if (!wouldBeIds.every((id, i) => id === versionIds[i])) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Handle arrays of objects
|
||||
else if (
|
||||
wouldBeArr.length > 0 &&
|
||||
typeof wouldBeArr[0] === 'object' &&
|
||||
wouldBeArr[0] !== null
|
||||
) {
|
||||
const sortedWouldBe = [...wouldBeArr].map((item) => JSON.stringify(item)).sort();
|
||||
const sortedVersion = [...lastVersionArr].map((item) => JSON.stringify(item)).sort();
|
||||
// Handle arrays of objects like tool_kwargs
|
||||
else if (typeof wouldBeVersion[field][0] === 'object' && wouldBeVersion[field][0] !== null) {
|
||||
const sortedWouldBe = [...wouldBeVersion[field]].map((item) => JSON.stringify(item)).sort();
|
||||
const sortedVersion = [...lastVersion[field]].map((item) => JSON.stringify(item)).sort();
|
||||
|
||||
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
const sortedWouldBe = [...wouldBeArr].sort();
|
||||
const sortedVersion = [...lastVersionArr].sort();
|
||||
const sortedWouldBe = [...wouldBeVersion[field]].sort();
|
||||
const sortedVersion = [...lastVersion[field]].sort();
|
||||
|
||||
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Handle objects
|
||||
else if (typeof wouldBeValue === 'object' && wouldBeValue !== null) {
|
||||
const lastVersionObj =
|
||||
typeof lastVersionValue === 'object' && lastVersionValue !== null ? lastVersionValue : {};
|
||||
|
||||
// For empty objects, normalize the comparison
|
||||
const wouldBeKeys = Object.keys(wouldBeValue);
|
||||
const lastVersionKeys = Object.keys(lastVersionObj);
|
||||
|
||||
// If both are empty objects, they're equal
|
||||
if (wouldBeKeys.length === 0 && lastVersionKeys.length === 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Otherwise do a deep comparison
|
||||
if (JSON.stringify(wouldBeValue) !== JSON.stringify(lastVersionObj)) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Handle primitive values
|
||||
else {
|
||||
// For primitives, handle the case where one is undefined and the other is a default value
|
||||
if (wouldBeValue !== lastVersionValue) {
|
||||
// Special handling for boolean false vs undefined
|
||||
if (
|
||||
typeof wouldBeValue === 'boolean' &&
|
||||
wouldBeValue === false &&
|
||||
lastVersionValue === undefined
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
// Special handling for empty string vs undefined
|
||||
if (
|
||||
typeof wouldBeValue === 'string' &&
|
||||
wouldBeValue === '' &&
|
||||
lastVersionValue === undefined
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
} else if (field === 'model_parameters') {
|
||||
const wouldBeParams = wouldBeVersion[field] || {};
|
||||
const lastVersionParams = lastVersion[field] || {};
|
||||
if (JSON.stringify(wouldBeParams) !== JSON.stringify(lastVersionParams)) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
} else if (wouldBeVersion[field] !== lastVersion[field]) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,24 +261,16 @@ const isDuplicateVersion = (updateData, currentData, versions, actionsHash = nul
|
||||
* @param {Object} [options] - Optional configuration object.
|
||||
* @param {string} [options.updatingUserId] - The ID of the user performing the update (used for tracking non-author updates).
|
||||
* @param {boolean} [options.forceVersion] - Force creation of a new version even if no fields changed.
|
||||
* @param {boolean} [options.skipVersioning] - Skip version creation entirely (useful for isolated operations like sharing).
|
||||
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
|
||||
* @throws {Error} If the update would create a duplicate version
|
||||
*/
|
||||
const updateAgent = async (searchParameter, updateData, options = {}) => {
|
||||
const { updatingUserId = null, forceVersion = false, skipVersioning = false } = options;
|
||||
const { updatingUserId = null, forceVersion = false } = options;
|
||||
const mongoOptions = { new: true, upsert: false };
|
||||
|
||||
const currentAgent = await Agent.findOne(searchParameter);
|
||||
if (currentAgent) {
|
||||
const {
|
||||
__v,
|
||||
_id,
|
||||
id: __id,
|
||||
versions,
|
||||
author: _author,
|
||||
...versionData
|
||||
} = currentAgent.toObject();
|
||||
const { __v, _id, id, versions, author, ...versionData } = currentAgent.toObject();
|
||||
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
||||
|
||||
let actionsHash = null;
|
||||
@@ -382,16 +302,25 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
|
||||
}
|
||||
|
||||
const shouldCreateVersion =
|
||||
!skipVersioning &&
|
||||
(forceVersion || Object.keys(directUpdates).length > 0 || $push || $pull || $addToSet);
|
||||
forceVersion ||
|
||||
(versions &&
|
||||
versions.length > 0 &&
|
||||
(Object.keys(directUpdates).length > 0 || $push || $pull || $addToSet));
|
||||
|
||||
if (shouldCreateVersion) {
|
||||
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions, actionsHash);
|
||||
if (duplicateVersion && !forceVersion) {
|
||||
// No changes detected, return the current agent without creating a new version
|
||||
const agentObj = currentAgent.toObject();
|
||||
agentObj.version = versions.length;
|
||||
return agentObj;
|
||||
const error = new Error(
|
||||
'Duplicate version: This would create a version identical to an existing one',
|
||||
);
|
||||
error.statusCode = 409;
|
||||
error.details = {
|
||||
duplicateVersion,
|
||||
versionIndex: versions.findIndex(
|
||||
(v) => JSON.stringify(duplicateVersion) === JSON.stringify(v),
|
||||
),
|
||||
};
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,7 +340,7 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
|
||||
versionEntry.updatedBy = new mongoose.Types.ObjectId(updatingUserId);
|
||||
}
|
||||
|
||||
if (shouldCreateVersion) {
|
||||
if (shouldCreateVersion || forceVersion) {
|
||||
updateData.$push = {
|
||||
...($push || {}),
|
||||
versions: versionEntry,
|
||||
@@ -530,117 +459,12 @@ const deleteAgent = async (searchParameter) => {
|
||||
const agent = await Agent.findOneAndDelete(searchParameter);
|
||||
if (agent) {
|
||||
await removeAgentFromAllProjects(agent.id);
|
||||
await removeAllPermissions({
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
});
|
||||
}
|
||||
return agent;
|
||||
};
|
||||
|
||||
/**
|
||||
* Get agents by accessible IDs with optional cursor-based pagination.
|
||||
* @param {Object} params - The parameters for getting accessible agents.
|
||||
* @param {Array} [params.accessibleIds] - Array of agent ObjectIds the user has ACL access to.
|
||||
* @param {Object} [params.otherParams] - Additional query parameters (including author filter).
|
||||
* @param {number} [params.limit] - Number of agents to return (max 100). If not provided, returns all agents.
|
||||
* @param {string} [params.after] - Cursor for pagination - get agents after this cursor. // base64 encoded JSON string with updatedAt and _id.
|
||||
* @returns {Promise<Object>} A promise that resolves to an object containing the agents data and pagination info.
|
||||
*/
|
||||
const getListAgentsByAccess = async ({
|
||||
accessibleIds = [],
|
||||
otherParams = {},
|
||||
limit = null,
|
||||
after = null,
|
||||
}) => {
|
||||
const isPaginated = limit !== null && limit !== undefined;
|
||||
const normalizedLimit = isPaginated ? Math.min(Math.max(1, parseInt(limit) || 20), 100) : null;
|
||||
|
||||
// Build base query combining ACL accessible agents with other filters
|
||||
const baseQuery = { ...otherParams, _id: { $in: accessibleIds } };
|
||||
|
||||
// Add cursor condition
|
||||
if (after) {
|
||||
try {
|
||||
const cursor = JSON.parse(Buffer.from(after, 'base64').toString('utf8'));
|
||||
const { updatedAt, _id } = cursor;
|
||||
|
||||
const cursorCondition = {
|
||||
$or: [
|
||||
{ updatedAt: { $lt: new Date(updatedAt) } },
|
||||
{ updatedAt: new Date(updatedAt), _id: { $gt: new mongoose.Types.ObjectId(_id) } },
|
||||
],
|
||||
};
|
||||
|
||||
// Merge cursor condition with base query
|
||||
if (Object.keys(baseQuery).length > 0) {
|
||||
baseQuery.$and = [{ ...baseQuery }, cursorCondition];
|
||||
// Remove the original conditions from baseQuery to avoid duplication
|
||||
Object.keys(baseQuery).forEach((key) => {
|
||||
if (key !== '$and') delete baseQuery[key];
|
||||
});
|
||||
} else {
|
||||
Object.assign(baseQuery, cursorCondition);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Invalid cursor:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
let query = Agent.find(baseQuery, {
|
||||
id: 1,
|
||||
_id: 1,
|
||||
name: 1,
|
||||
avatar: 1,
|
||||
author: 1,
|
||||
projectIds: 1,
|
||||
description: 1,
|
||||
updatedAt: 1,
|
||||
category: 1,
|
||||
support_contact: 1,
|
||||
is_promoted: 1,
|
||||
}).sort({ updatedAt: -1, _id: 1 });
|
||||
|
||||
// Only apply limit if pagination is requested
|
||||
if (isPaginated) {
|
||||
query = query.limit(normalizedLimit + 1);
|
||||
}
|
||||
|
||||
const agents = await query.lean();
|
||||
|
||||
const hasMore = isPaginated ? agents.length > normalizedLimit : false;
|
||||
const data = (isPaginated ? agents.slice(0, normalizedLimit) : agents).map((agent) => {
|
||||
if (agent.author) {
|
||||
agent.author = agent.author.toString();
|
||||
}
|
||||
return agent;
|
||||
});
|
||||
|
||||
// Generate next cursor only if paginated
|
||||
let nextCursor = null;
|
||||
if (isPaginated && hasMore && data.length > 0) {
|
||||
const lastAgent = agents[normalizedLimit - 1];
|
||||
nextCursor = Buffer.from(
|
||||
JSON.stringify({
|
||||
updatedAt: lastAgent.updatedAt.toISOString(),
|
||||
_id: lastAgent._id.toString(),
|
||||
}),
|
||||
).toString('base64');
|
||||
}
|
||||
|
||||
return {
|
||||
object: 'list',
|
||||
data,
|
||||
first_id: data.length > 0 ? data[0].id : null,
|
||||
last_id: data.length > 0 ? data[data.length - 1].id : null,
|
||||
has_more: hasMore,
|
||||
after: nextCursor,
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Get all agents.
|
||||
* @deprecated Use getListAgentsByAccess for ACL-aware agent listing
|
||||
* @param {Object} searchParameter - The search parameters to find matching agents.
|
||||
* @param {string} searchParameter.author - The user ID of the agent's author.
|
||||
* @returns {Promise<Object>} A promise that resolves to an object containing the agents data and pagination info.
|
||||
@@ -659,15 +483,13 @@ const getListAgents = async (searchParameter) => {
|
||||
const agents = (
|
||||
await Agent.find(query, {
|
||||
id: 1,
|
||||
_id: 1,
|
||||
_id: 0,
|
||||
name: 1,
|
||||
avatar: 1,
|
||||
author: 1,
|
||||
projectIds: 1,
|
||||
description: 1,
|
||||
// @deprecated - isCollaborative replaced by ACL permissions
|
||||
isCollaborative: 1,
|
||||
category: 1,
|
||||
}).lean()
|
||||
).map((agent) => {
|
||||
if (agent.author?.toString() !== author) {
|
||||
@@ -696,7 +518,7 @@ const getListAgents = async (searchParameter) => {
|
||||
* This function also updates the corresponding projects to include or exclude the agent ID.
|
||||
*
|
||||
* @param {Object} params - Parameters for updating the agent's projects.
|
||||
* @param {IUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {MongoUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {string} params.agentId - The ID of the agent to update.
|
||||
* @param {string[]} [params.projectIds] - Array of project IDs to add to the agent.
|
||||
* @param {string[]} [params.removeProjectIds] - Array of project IDs to remove from the agent.
|
||||
@@ -729,10 +551,7 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
|
||||
delete updateQuery.author;
|
||||
}
|
||||
|
||||
const updatedAgent = await updateAgent(updateQuery, updateOps, {
|
||||
updatingUserId: user.id,
|
||||
skipVersioning: true,
|
||||
});
|
||||
const updatedAgent = await updateAgent(updateQuery, updateOps, { updatingUserId: user.id });
|
||||
if (updatedAgent) {
|
||||
return updatedAgent;
|
||||
}
|
||||
@@ -833,14 +652,6 @@ const generateActionMetadataHash = async (actionIds, actions) => {
|
||||
|
||||
return hashHex;
|
||||
};
|
||||
/**
|
||||
* Counts the number of promoted agents.
|
||||
* @returns {Promise<number>} - The count of promoted agents
|
||||
*/
|
||||
const countPromotedAgents = async () => {
|
||||
const count = await Agent.countDocuments({ is_promoted: true });
|
||||
return count;
|
||||
};
|
||||
|
||||
/**
|
||||
* Load a default agent based on the endpoint
|
||||
@@ -850,7 +661,6 @@ const countPromotedAgents = async () => {
|
||||
|
||||
module.exports = {
|
||||
getAgent,
|
||||
getAgents,
|
||||
loadAgent,
|
||||
createAgent,
|
||||
updateAgent,
|
||||
@@ -859,8 +669,6 @@ module.exports = {
|
||||
revertAgentVersion,
|
||||
updateAgentProjects,
|
||||
addAgentResourceFile,
|
||||
getListAgentsByAccess,
|
||||
removeAgentResourceFiles,
|
||||
generateActionMetadataHash,
|
||||
countPromotedAgents,
|
||||
};
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,5 @@
|
||||
const { Assistant } = require('~/db/models');
|
||||
const mongoose = require('mongoose');
|
||||
const Assistant = require('~/db/models').Assistant;
|
||||
|
||||
/**
|
||||
* Update an assistant with new data without overwriting existing properties,
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Banner } = require('~/db/models');
|
||||
|
||||
const Banner = require('~/db/models').Banner;
|
||||
|
||||
/**
|
||||
* Retrieves the current active banner.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const options = [
|
||||
{
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { createTempChatExpirationDate } = require('@librechat/api');
|
||||
const { getMessages, deleteMessages } = require('./Message');
|
||||
const { Conversation } = require('~/db/models');
|
||||
|
||||
const Conversation = require('~/db/models').Conversation;
|
||||
|
||||
/**
|
||||
* Searches for a conversation by conversationId and returns a lean document with only conversationId and user.
|
||||
@@ -99,15 +100,10 @@ module.exports = {
|
||||
update.conversationId = newConversationId;
|
||||
}
|
||||
|
||||
if (req?.body?.isTemporary) {
|
||||
try {
|
||||
const appConfig = req.config;
|
||||
update.expiredAt = createTempChatExpirationDate(appConfig?.interfaceConfig);
|
||||
} catch (err) {
|
||||
logger.error('Error creating temporary chat expiration date:', err);
|
||||
logger.info(`---\`saveConvo\` context: ${metadata?.context}`);
|
||||
update.expiredAt = null;
|
||||
}
|
||||
if (req.body.isTemporary) {
|
||||
const expiredAt = new Date();
|
||||
expiredAt.setDate(expiredAt.getDate() + 30);
|
||||
update.expiredAt = expiredAt;
|
||||
} else {
|
||||
update.expiredAt = null;
|
||||
}
|
||||
@@ -174,7 +170,7 @@ module.exports = {
|
||||
|
||||
if (search) {
|
||||
try {
|
||||
const meiliResults = await Conversation.meiliSearch(search, { filter: `user = "${user}"` });
|
||||
const meiliResults = await Conversation.meiliSearch(search);
|
||||
const matchingIds = Array.isArray(meiliResults.hits)
|
||||
? meiliResults.hits.map((result) => result.conversationId)
|
||||
: [];
|
||||
|
||||
@@ -1,570 +0,0 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const {
|
||||
deleteNullOrEmptyConversations,
|
||||
searchConversation,
|
||||
getConvosByCursor,
|
||||
getConvosQueried,
|
||||
getConvoFiles,
|
||||
getConvoTitle,
|
||||
deleteConvos,
|
||||
saveConvo,
|
||||
getConvo,
|
||||
} = require('./Conversation');
|
||||
jest.mock('~/server/services/Config/app');
|
||||
jest.mock('./Message');
|
||||
const { getMessages, deleteMessages } = require('./Message');
|
||||
|
||||
const { Conversation } = require('~/db/models');
|
||||
|
||||
describe('Conversation Operations', () => {
|
||||
let mongoServer;
|
||||
let mockReq;
|
||||
let mockConversationData;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear database
|
||||
await Conversation.deleteMany({});
|
||||
|
||||
// Reset mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Default mock implementations
|
||||
getMessages.mockResolvedValue([]);
|
||||
deleteMessages.mockResolvedValue({ deletedCount: 0 });
|
||||
|
||||
mockReq = {
|
||||
user: { id: 'user123' },
|
||||
body: {},
|
||||
config: {
|
||||
interfaceConfig: {
|
||||
temporaryChatRetention: 24, // Default 24 hours
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
mockConversationData = {
|
||||
conversationId: uuidv4(),
|
||||
title: 'Test Conversation',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
};
|
||||
});
|
||||
|
||||
describe('saveConvo', () => {
|
||||
it('should save a conversation for an authenticated user', async () => {
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.user).toBe('user123');
|
||||
expect(result.title).toBe('Test Conversation');
|
||||
expect(result.endpoint).toBe(EModelEndpoint.openAI);
|
||||
|
||||
// Verify the conversation was actually saved to the database
|
||||
const savedConvo = await Conversation.findOne({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
});
|
||||
expect(savedConvo).toBeTruthy();
|
||||
expect(savedConvo.title).toBe('Test Conversation');
|
||||
});
|
||||
|
||||
it('should query messages when saving a conversation', async () => {
|
||||
// Mock messages as ObjectIds
|
||||
const mongoose = require('mongoose');
|
||||
const mockMessages = [new mongoose.Types.ObjectId(), new mongoose.Types.ObjectId()];
|
||||
getMessages.mockResolvedValue(mockMessages);
|
||||
|
||||
await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
// Verify that getMessages was called with correct parameters
|
||||
expect(getMessages).toHaveBeenCalledWith(
|
||||
{ conversationId: mockConversationData.conversationId },
|
||||
'_id',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle newConversationId when provided', async () => {
|
||||
const newConversationId = uuidv4();
|
||||
const result = await saveConvo(mockReq, {
|
||||
...mockConversationData,
|
||||
newConversationId,
|
||||
});
|
||||
|
||||
expect(result.conversationId).toBe(newConversationId);
|
||||
});
|
||||
|
||||
it('should handle unsetFields metadata', async () => {
|
||||
const metadata = {
|
||||
unsetFields: { someField: 1 },
|
||||
};
|
||||
|
||||
await saveConvo(mockReq, mockConversationData, metadata);
|
||||
|
||||
const savedConvo = await Conversation.findOne({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
});
|
||||
expect(savedConvo.someField).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('isTemporary conversation handling', () => {
|
||||
it('should save a conversation with expiredAt when isTemporary is true', async () => {
|
||||
// Mock app config with 24 hour retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
const afterSave = new Date();
|
||||
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
expect(result.expiredAt).toBeInstanceOf(Date);
|
||||
|
||||
// Verify expiredAt is approximately 24 hours in the future
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
new Date(afterSave.getTime() + 24 * 60 * 60 * 1000 + 1000).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should save a conversation without expiredAt when isTemporary is false', async () => {
|
||||
mockReq.body = { isTemporary: false };
|
||||
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.expiredAt).toBeNull();
|
||||
});
|
||||
|
||||
it('should save a conversation without expiredAt when isTemporary is not provided', async () => {
|
||||
// No isTemporary in body
|
||||
mockReq.body = {};
|
||||
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.expiredAt).toBeNull();
|
||||
});
|
||||
|
||||
it('should use custom retention period from config', async () => {
|
||||
// Mock app config with 48 hour retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 48;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Verify expiredAt is approximately 48 hours in the future
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 48 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle minimum retention period (1 hour)', async () => {
|
||||
// Mock app config with less than minimum retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 0.5; // Half hour - should be clamped to 1 hour
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Verify expiredAt is approximately 1 hour in the future (minimum)
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 1 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle maximum retention period (8760 hours)', async () => {
|
||||
// Mock app config with more than maximum retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 10000; // Should be clamped to 8760 hours
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Verify expiredAt is approximately 8760 hours (1 year) in the future
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 8760 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle missing config gracefully', async () => {
|
||||
// Simulate missing config - should use default retention period
|
||||
delete mockReq.config;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
const afterSave = new Date();
|
||||
|
||||
// Should still save the conversation with default retention period (30 days)
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
expect(result.expiredAt).toBeInstanceOf(Date);
|
||||
|
||||
// Verify expiredAt is approximately 30 days in the future (720 hours)
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 720 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
new Date(afterSave.getTime() + 720 * 60 * 60 * 1000 + 1000).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should use default retention when config is not provided', async () => {
|
||||
// Mock getAppConfig to return empty config
|
||||
mockReq.config = {}; // Empty config
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Default retention is 30 days (720 hours)
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 30 * 24 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should update expiredAt when saving existing temporary conversation', async () => {
|
||||
// First save a temporary conversation
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
const firstSave = await saveConvo(mockReq, mockConversationData);
|
||||
const originalExpiredAt = firstSave.expiredAt;
|
||||
|
||||
// Wait a bit to ensure time difference
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
|
||||
// Save again with same conversationId but different title
|
||||
const updatedData = { ...mockConversationData, title: 'Updated Title' };
|
||||
const secondSave = await saveConvo(mockReq, updatedData);
|
||||
|
||||
// Should update title and create new expiredAt
|
||||
expect(secondSave.title).toBe('Updated Title');
|
||||
expect(secondSave.expiredAt).toBeDefined();
|
||||
expect(new Date(secondSave.expiredAt).getTime()).toBeGreaterThan(
|
||||
new Date(originalExpiredAt).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should not set expiredAt when updating non-temporary conversation', async () => {
|
||||
// First save a non-temporary conversation
|
||||
mockReq.body = { isTemporary: false };
|
||||
const firstSave = await saveConvo(mockReq, mockConversationData);
|
||||
expect(firstSave.expiredAt).toBeNull();
|
||||
|
||||
// Update without isTemporary flag
|
||||
mockReq.body = {};
|
||||
const updatedData = { ...mockConversationData, title: 'Updated Title' };
|
||||
const secondSave = await saveConvo(mockReq, updatedData);
|
||||
|
||||
expect(secondSave.title).toBe('Updated Title');
|
||||
expect(secondSave.expiredAt).toBeNull();
|
||||
});
|
||||
|
||||
it('should filter out expired conversations in getConvosByCursor', async () => {
|
||||
// Create some test conversations
|
||||
const nonExpiredConvo = await Conversation.create({
|
||||
conversationId: uuidv4(),
|
||||
user: 'user123',
|
||||
title: 'Non-expired',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
expiredAt: null,
|
||||
updatedAt: new Date(),
|
||||
});
|
||||
|
||||
await Conversation.create({
|
||||
conversationId: uuidv4(),
|
||||
user: 'user123',
|
||||
title: 'Future expired',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000), // 24 hours from now
|
||||
updatedAt: new Date(),
|
||||
});
|
||||
|
||||
// Mock Meili search
|
||||
Conversation.meiliSearch = jest.fn().mockResolvedValue({ hits: [] });
|
||||
|
||||
const result = await getConvosByCursor('user123');
|
||||
|
||||
// Should only return conversations with null or non-existent expiredAt
|
||||
expect(result.conversations).toHaveLength(1);
|
||||
expect(result.conversations[0].conversationId).toBe(nonExpiredConvo.conversationId);
|
||||
});
|
||||
|
||||
it('should filter out expired conversations in getConvosQueried', async () => {
|
||||
// Create test conversations
|
||||
const nonExpiredConvo = await Conversation.create({
|
||||
conversationId: uuidv4(),
|
||||
user: 'user123',
|
||||
title: 'Non-expired',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
expiredAt: null,
|
||||
});
|
||||
|
||||
const expiredConvo = await Conversation.create({
|
||||
conversationId: uuidv4(),
|
||||
user: 'user123',
|
||||
title: 'Expired',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000),
|
||||
});
|
||||
|
||||
const convoIds = [
|
||||
{ conversationId: nonExpiredConvo.conversationId },
|
||||
{ conversationId: expiredConvo.conversationId },
|
||||
];
|
||||
|
||||
const result = await getConvosQueried('user123', convoIds);
|
||||
|
||||
// Should only return the non-expired conversation
|
||||
expect(result.conversations).toHaveLength(1);
|
||||
expect(result.conversations[0].conversationId).toBe(nonExpiredConvo.conversationId);
|
||||
expect(result.convoMap[nonExpiredConvo.conversationId]).toBeDefined();
|
||||
expect(result.convoMap[expiredConvo.conversationId]).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('searchConversation', () => {
|
||||
it('should find a conversation by conversationId', async () => {
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
title: 'Test',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
const result = await searchConversation(mockConversationData.conversationId);
|
||||
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.user).toBe('user123');
|
||||
expect(result.title).toBeUndefined(); // Only returns conversationId and user
|
||||
});
|
||||
|
||||
it('should return null if conversation not found', async () => {
|
||||
const result = await searchConversation('non-existent-id');
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getConvo', () => {
|
||||
it('should retrieve a conversation for a user', async () => {
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
title: 'Test Conversation',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
const result = await getConvo('user123', mockConversationData.conversationId);
|
||||
|
||||
expect(result.conversationId).toBe(mockConversationData.conversationId);
|
||||
expect(result.user).toBe('user123');
|
||||
expect(result.title).toBe('Test Conversation');
|
||||
});
|
||||
|
||||
it('should return null if conversation not found', async () => {
|
||||
const result = await getConvo('user123', 'non-existent-id');
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getConvoTitle', () => {
|
||||
it('should return the conversation title', async () => {
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
title: 'Test Title',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
const result = await getConvoTitle('user123', mockConversationData.conversationId);
|
||||
expect(result).toBe('Test Title');
|
||||
});
|
||||
|
||||
it('should return null if conversation has no title', async () => {
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
title: null,
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
const result = await getConvoTitle('user123', mockConversationData.conversationId);
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
it('should return "New Chat" if conversation not found', async () => {
|
||||
const result = await getConvoTitle('user123', 'non-existent-id');
|
||||
expect(result).toBe('New Chat');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getConvoFiles', () => {
|
||||
it('should return conversation files', async () => {
|
||||
const files = ['file1', 'file2'];
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
files,
|
||||
});
|
||||
|
||||
const result = await getConvoFiles(mockConversationData.conversationId);
|
||||
expect(result).toEqual(files);
|
||||
});
|
||||
|
||||
it('should return empty array if no files', async () => {
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
const result = await getConvoFiles(mockConversationData.conversationId);
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
|
||||
it('should return empty array if conversation not found', async () => {
|
||||
const result = await getConvoFiles('non-existent-id');
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteConvos', () => {
|
||||
it('should delete conversations and associated messages', async () => {
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user123',
|
||||
title: 'To Delete',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
deleteMessages.mockResolvedValue({ deletedCount: 5 });
|
||||
|
||||
const result = await deleteConvos('user123', {
|
||||
conversationId: mockConversationData.conversationId,
|
||||
});
|
||||
|
||||
expect(result.deletedCount).toBe(1);
|
||||
expect(result.messages.deletedCount).toBe(5);
|
||||
expect(deleteMessages).toHaveBeenCalledWith({
|
||||
conversationId: { $in: [mockConversationData.conversationId] },
|
||||
});
|
||||
|
||||
// Verify conversation was deleted
|
||||
const deletedConvo = await Conversation.findOne({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
});
|
||||
expect(deletedConvo).toBeNull();
|
||||
});
|
||||
|
||||
it('should throw error if no conversations found', async () => {
|
||||
await expect(deleteConvos('user123', { conversationId: 'non-existent' })).rejects.toThrow(
|
||||
'Conversation not found or already deleted.',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteNullOrEmptyConversations', () => {
|
||||
it('should delete conversations with null, empty, or missing conversationIds', async () => {
|
||||
// Since conversationId is required by the schema, we can't create documents with null/missing IDs
|
||||
// This test should verify the function works when such documents exist (e.g., from data corruption)
|
||||
|
||||
// For this test, let's create a valid conversation and verify the function doesn't delete it
|
||||
await Conversation.create({
|
||||
conversationId: mockConversationData.conversationId,
|
||||
user: 'user4',
|
||||
endpoint: EModelEndpoint.openAI,
|
||||
});
|
||||
|
||||
deleteMessages.mockResolvedValue({ deletedCount: 0 });
|
||||
|
||||
const result = await deleteNullOrEmptyConversations();
|
||||
|
||||
expect(result.conversations.deletedCount).toBe(0); // No invalid conversations to delete
|
||||
expect(result.messages.deletedCount).toBe(0);
|
||||
|
||||
// Verify valid conversation remains
|
||||
const remainingConvos = await Conversation.find({});
|
||||
expect(remainingConvos).toHaveLength(1);
|
||||
expect(remainingConvos[0].conversationId).toBe(mockConversationData.conversationId);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error Handling', () => {
|
||||
it('should handle database errors in saveConvo', async () => {
|
||||
// Force a database error by disconnecting
|
||||
await mongoose.disconnect();
|
||||
|
||||
const result = await saveConvo(mockReq, mockConversationData);
|
||||
|
||||
expect(result).toEqual({ message: 'Error saving conversation' });
|
||||
|
||||
// Reconnect for other tests
|
||||
await mongoose.connect(mongoServer.getUri());
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { ConversationTag, Conversation } = require('~/db/models');
|
||||
|
||||
const ConversationTag = require('~/db/models').ConversationTag;
|
||||
const Conversation = require('~/db/models').Conversation;
|
||||
|
||||
/**
|
||||
* Retrieves all conversation tags for a user.
|
||||
@@ -239,46 +242,10 @@ const updateTagsForConversation = async (user, conversationId, tags) => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Increments tag counts for existing tags only.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {string[]} tags - Array of tag names to increment
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const bulkIncrementTagCounts = async (user, tags) => {
|
||||
if (!tags || tags.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const uniqueTags = [...new Set(tags.filter(Boolean))];
|
||||
if (uniqueTags.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const bulkOps = uniqueTags.map((tag) => ({
|
||||
updateOne: {
|
||||
filter: { user, tag },
|
||||
update: { $inc: { count: 1 } },
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await ConversationTag.bulkWrite(bulkOps);
|
||||
if (result && result.modifiedCount > 0) {
|
||||
logger.debug(
|
||||
`user: ${user} | Incremented tag counts - modified ${result.modifiedCount} tags`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[bulkIncrementTagCounts] Error incrementing tag counts', error);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getConversationTags,
|
||||
createConversationTag,
|
||||
updateConversationTag,
|
||||
deleteConversationTag,
|
||||
bulkIncrementTagCounts,
|
||||
updateTagsForConversation,
|
||||
};
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { EToolResources, FileContext } = require('librechat-data-provider');
|
||||
const { File } = require('~/db/models');
|
||||
const { EToolResources } = require('librechat-data-provider');
|
||||
|
||||
const File = require('~/db/models').File;
|
||||
|
||||
/**
|
||||
* Finds a file by its file_id with additional query options.
|
||||
@@ -32,19 +34,19 @@ const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
|
||||
* @returns {Promise<Array<MongoFile>>} Files that match the criteria
|
||||
*/
|
||||
const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
||||
if (!fileIds || !fileIds.length || !toolResourceSet?.size) {
|
||||
if (!fileIds || !fileIds.length) {
|
||||
return [];
|
||||
}
|
||||
|
||||
try {
|
||||
const filter = {
|
||||
file_id: { $in: fileIds },
|
||||
$or: [],
|
||||
};
|
||||
|
||||
if (toolResourceSet.has(EToolResources.context)) {
|
||||
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
|
||||
if (toolResourceSet.size) {
|
||||
filter.$or = [];
|
||||
}
|
||||
|
||||
if (toolResourceSet.has(EToolResources.file_search)) {
|
||||
filter.$or.push({ embedded: true });
|
||||
}
|
||||
|
||||
@@ -1,621 +0,0 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { createModels } = require('@librechat/data-schemas');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const {
|
||||
SystemRoles,
|
||||
ResourceType,
|
||||
AccessRoleIds,
|
||||
PrincipalType,
|
||||
} = require('librechat-data-provider');
|
||||
const { grantPermission } = require('~/server/services/PermissionService');
|
||||
const { getFiles, createFile } = require('./File');
|
||||
const { seedDefaultRoles } = require('~/models');
|
||||
const { createAgent } = require('./Agent');
|
||||
|
||||
let File;
|
||||
let Agent;
|
||||
let AclEntry;
|
||||
let User;
|
||||
let modelsToCleanup = [];
|
||||
|
||||
describe('File Access Control', () => {
|
||||
let mongoServer;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
|
||||
// Initialize all models
|
||||
const models = createModels(mongoose);
|
||||
|
||||
// Track which models we're adding
|
||||
modelsToCleanup = Object.keys(models);
|
||||
|
||||
// Register models on mongoose.models so methods can access them
|
||||
const dbModels = require('~/db/models');
|
||||
Object.assign(mongoose.models, dbModels);
|
||||
|
||||
File = dbModels.File;
|
||||
Agent = dbModels.Agent;
|
||||
AclEntry = dbModels.AclEntry;
|
||||
User = dbModels.User;
|
||||
|
||||
// Seed default roles
|
||||
await seedDefaultRoles();
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
// Clean up all collections before disconnecting
|
||||
const collections = mongoose.connection.collections;
|
||||
for (const key in collections) {
|
||||
await collections[key].deleteMany({});
|
||||
}
|
||||
|
||||
// Clear only the models we added
|
||||
for (const modelName of modelsToCleanup) {
|
||||
if (mongoose.models[modelName]) {
|
||||
delete mongoose.models[modelName];
|
||||
}
|
||||
}
|
||||
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
await File.deleteMany({});
|
||||
await Agent.deleteMany({});
|
||||
await AclEntry.deleteMany({});
|
||||
await User.deleteMany({});
|
||||
// Don't delete AccessRole as they are seeded defaults needed for tests
|
||||
});
|
||||
|
||||
describe('hasAccessToFilesViaAgent', () => {
|
||||
it('should efficiently check access for multiple files at once', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const agentId = uuidv4();
|
||||
const fileIds = [uuidv4(), uuidv4(), uuidv4(), uuidv4()];
|
||||
|
||||
// Create users
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create files
|
||||
for (const fileId of fileIds) {
|
||||
await createFile({
|
||||
user: authorId,
|
||||
file_id: fileId,
|
||||
filename: `file-${fileId}.txt`,
|
||||
filepath: `/uploads/${fileId}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Create agent with only first two files attached
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
author: authorId,
|
||||
model: 'gpt-4',
|
||||
provider: 'openai',
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: [fileIds[0], fileIds[1]],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant EDIT permission to user on the agent
|
||||
await grantPermission({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: userId,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
accessRoleId: AccessRoleIds.AGENT_EDITOR,
|
||||
grantedBy: authorId,
|
||||
});
|
||||
|
||||
// Check access for all files
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
const accessMap = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: SystemRoles.USER,
|
||||
fileIds,
|
||||
agentId: agent.id, // Use agent.id which is the custom UUID
|
||||
});
|
||||
|
||||
// Should have access only to the first two files
|
||||
expect(accessMap.get(fileIds[0])).toBe(true);
|
||||
expect(accessMap.get(fileIds[1])).toBe(true);
|
||||
expect(accessMap.get(fileIds[2])).toBe(false);
|
||||
expect(accessMap.get(fileIds[3])).toBe(false);
|
||||
});
|
||||
|
||||
it('should grant access to all files when user is the agent author', async () => {
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const agentId = uuidv4();
|
||||
const fileIds = [uuidv4(), uuidv4(), uuidv4()];
|
||||
|
||||
// Create author user
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create agent
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
author: authorId,
|
||||
model: 'gpt-4',
|
||||
provider: 'openai',
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: [fileIds[0]], // Only one file attached
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Check access as the author
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
const accessMap = await hasAccessToFilesViaAgent({
|
||||
userId: authorId,
|
||||
role: SystemRoles.USER,
|
||||
fileIds,
|
||||
agentId,
|
||||
});
|
||||
|
||||
// Author should have access to all files
|
||||
expect(accessMap.get(fileIds[0])).toBe(true);
|
||||
expect(accessMap.get(fileIds[1])).toBe(true);
|
||||
expect(accessMap.get(fileIds[2])).toBe(true);
|
||||
});
|
||||
|
||||
it('should handle non-existent agent gracefully', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const fileIds = [uuidv4(), uuidv4()];
|
||||
|
||||
// Create user
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
const accessMap = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: SystemRoles.USER,
|
||||
fileIds,
|
||||
agentId: 'non-existent-agent',
|
||||
});
|
||||
|
||||
// Should have no access to any files
|
||||
expect(accessMap.get(fileIds[0])).toBe(false);
|
||||
expect(accessMap.get(fileIds[1])).toBe(false);
|
||||
});
|
||||
|
||||
it('should deny access when user only has VIEW permission and needs access for deletion', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const agentId = uuidv4();
|
||||
const fileIds = [uuidv4(), uuidv4()];
|
||||
|
||||
// Create users
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create agent with files
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'View-Only Agent',
|
||||
author: authorId,
|
||||
model: 'gpt-4',
|
||||
provider: 'openai',
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: fileIds,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant only VIEW permission to user on the agent
|
||||
await grantPermission({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: userId,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
accessRoleId: AccessRoleIds.AGENT_VIEWER,
|
||||
grantedBy: authorId,
|
||||
});
|
||||
|
||||
// Check access for files
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
const accessMap = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: SystemRoles.USER,
|
||||
fileIds,
|
||||
agentId,
|
||||
isDelete: true,
|
||||
});
|
||||
|
||||
// Should have no access to any files when only VIEW permission
|
||||
expect(accessMap.get(fileIds[0])).toBe(false);
|
||||
expect(accessMap.get(fileIds[1])).toBe(false);
|
||||
});
|
||||
|
||||
it('should grant access when user has VIEW permission', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const agentId = uuidv4();
|
||||
const fileIds = [uuidv4(), uuidv4()];
|
||||
|
||||
// Create users
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create agent with files
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'View-Only Agent',
|
||||
author: authorId,
|
||||
model: 'gpt-4',
|
||||
provider: 'openai',
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: fileIds,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant only VIEW permission to user on the agent
|
||||
await grantPermission({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: userId,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
accessRoleId: AccessRoleIds.AGENT_VIEWER,
|
||||
grantedBy: authorId,
|
||||
});
|
||||
|
||||
// Check access for files
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
const accessMap = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: SystemRoles.USER,
|
||||
fileIds,
|
||||
agentId,
|
||||
});
|
||||
|
||||
expect(accessMap.get(fileIds[0])).toBe(true);
|
||||
expect(accessMap.get(fileIds[1])).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getFiles with agent access control', () => {
|
||||
test('should return files owned by user and files accessible through agent', async () => {
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const ownedFileId = `file_${uuidv4()}`;
|
||||
const sharedFileId = `file_${uuidv4()}`;
|
||||
const inaccessibleFileId = `file_${uuidv4()}`;
|
||||
|
||||
// Create users
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create agent with shared file
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'Shared Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: [sharedFileId],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant EDIT permission to user on the agent
|
||||
await grantPermission({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: userId,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
accessRoleId: AccessRoleIds.AGENT_EDITOR,
|
||||
grantedBy: authorId,
|
||||
});
|
||||
|
||||
// Create files
|
||||
await createFile({
|
||||
file_id: ownedFileId,
|
||||
user: userId,
|
||||
filename: 'owned.txt',
|
||||
filepath: '/uploads/owned.txt',
|
||||
type: 'text/plain',
|
||||
bytes: 100,
|
||||
});
|
||||
|
||||
await createFile({
|
||||
file_id: sharedFileId,
|
||||
user: authorId,
|
||||
filename: 'shared.txt',
|
||||
filepath: '/uploads/shared.txt',
|
||||
type: 'text/plain',
|
||||
bytes: 200,
|
||||
embedded: true,
|
||||
});
|
||||
|
||||
await createFile({
|
||||
file_id: inaccessibleFileId,
|
||||
user: authorId,
|
||||
filename: 'inaccessible.txt',
|
||||
filepath: '/uploads/inaccessible.txt',
|
||||
type: 'text/plain',
|
||||
bytes: 300,
|
||||
});
|
||||
|
||||
// Get all files first
|
||||
const allFiles = await getFiles(
|
||||
{ file_id: { $in: [ownedFileId, sharedFileId, inaccessibleFileId] } },
|
||||
null,
|
||||
{ text: 0 },
|
||||
);
|
||||
|
||||
// Then filter by access control
|
||||
const { filterFilesByAgentAccess } = require('~/server/services/Files/permissions');
|
||||
const files = await filterFilesByAgentAccess({
|
||||
files: allFiles,
|
||||
userId: userId,
|
||||
role: SystemRoles.USER,
|
||||
agentId,
|
||||
});
|
||||
|
||||
expect(files).toHaveLength(2);
|
||||
expect(files.map((f) => f.file_id)).toContain(ownedFileId);
|
||||
expect(files.map((f) => f.file_id)).toContain(sharedFileId);
|
||||
expect(files.map((f) => f.file_id)).not.toContain(inaccessibleFileId);
|
||||
});
|
||||
|
||||
test('should return all files when no userId/agentId provided', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const fileId1 = `file_${uuidv4()}`;
|
||||
const fileId2 = `file_${uuidv4()}`;
|
||||
|
||||
await createFile({
|
||||
file_id: fileId1,
|
||||
user: userId,
|
||||
filename: 'file1.txt',
|
||||
filepath: '/uploads/file1.txt',
|
||||
type: 'text/plain',
|
||||
bytes: 100,
|
||||
});
|
||||
|
||||
await createFile({
|
||||
file_id: fileId2,
|
||||
user: new mongoose.Types.ObjectId(),
|
||||
filename: 'file2.txt',
|
||||
filepath: '/uploads/file2.txt',
|
||||
type: 'text/plain',
|
||||
bytes: 200,
|
||||
});
|
||||
|
||||
const files = await getFiles({ file_id: { $in: [fileId1, fileId2] } });
|
||||
expect(files).toHaveLength(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Role-based file permissions', () => {
|
||||
it('should optimize permission checks when role is provided', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const agentId = uuidv4();
|
||||
const fileIds = [uuidv4(), uuidv4()];
|
||||
|
||||
// Create users
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
role: 'ADMIN', // User has ADMIN role
|
||||
});
|
||||
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create files
|
||||
for (const fileId of fileIds) {
|
||||
await createFile({
|
||||
file_id: fileId,
|
||||
user: authorId,
|
||||
filename: `${fileId}.txt`,
|
||||
filepath: `/uploads/${fileId}.txt`,
|
||||
type: 'text/plain',
|
||||
bytes: 100,
|
||||
});
|
||||
}
|
||||
|
||||
// Create agent with files
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
author: authorId,
|
||||
model: 'gpt-4',
|
||||
provider: 'openai',
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: fileIds,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant permission to ADMIN role
|
||||
await grantPermission({
|
||||
principalType: PrincipalType.ROLE,
|
||||
principalId: 'ADMIN',
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
accessRoleId: AccessRoleIds.AGENT_EDITOR,
|
||||
grantedBy: authorId,
|
||||
});
|
||||
|
||||
// Check access with role provided (should avoid DB query)
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
const accessMapWithRole = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: 'ADMIN',
|
||||
fileIds,
|
||||
agentId: agent.id,
|
||||
});
|
||||
|
||||
// User should have access through their ADMIN role
|
||||
expect(accessMapWithRole.get(fileIds[0])).toBe(true);
|
||||
expect(accessMapWithRole.get(fileIds[1])).toBe(true);
|
||||
|
||||
// Check access without role (will query DB to get user's role)
|
||||
const accessMapWithoutRole = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
fileIds,
|
||||
agentId: agent.id,
|
||||
});
|
||||
|
||||
// Should have same result
|
||||
expect(accessMapWithoutRole.get(fileIds[0])).toBe(true);
|
||||
expect(accessMapWithoutRole.get(fileIds[1])).toBe(true);
|
||||
});
|
||||
|
||||
it('should deny access when user role changes', async () => {
|
||||
const userId = new mongoose.Types.ObjectId();
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const agentId = uuidv4();
|
||||
const fileId = uuidv4();
|
||||
|
||||
// Create users
|
||||
await User.create({
|
||||
_id: userId,
|
||||
email: 'user@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
role: 'EDITOR',
|
||||
});
|
||||
|
||||
await User.create({
|
||||
_id: authorId,
|
||||
email: 'author@example.com',
|
||||
emailVerified: true,
|
||||
provider: 'local',
|
||||
});
|
||||
|
||||
// Create file
|
||||
await createFile({
|
||||
file_id: fileId,
|
||||
user: authorId,
|
||||
filename: 'test.txt',
|
||||
filepath: '/uploads/test.txt',
|
||||
type: 'text/plain',
|
||||
bytes: 100,
|
||||
});
|
||||
|
||||
// Create agent
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
author: authorId,
|
||||
model: 'gpt-4',
|
||||
provider: 'openai',
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: [fileId],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant permission to EDITOR role only
|
||||
await grantPermission({
|
||||
principalType: PrincipalType.ROLE,
|
||||
principalId: 'EDITOR',
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
accessRoleId: AccessRoleIds.AGENT_EDITOR,
|
||||
grantedBy: authorId,
|
||||
});
|
||||
|
||||
const { hasAccessToFilesViaAgent } = require('~/server/services/Files/permissions');
|
||||
|
||||
// Check with EDITOR role - should have access
|
||||
const accessAsEditor = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: 'EDITOR',
|
||||
fileIds: [fileId],
|
||||
agentId: agent.id,
|
||||
});
|
||||
expect(accessAsEditor.get(fileId)).toBe(true);
|
||||
|
||||
// Simulate role change to USER - should lose access
|
||||
const accessAsUser = await hasAccessToFilesViaAgent({
|
||||
userId: userId,
|
||||
role: SystemRoles.USER,
|
||||
fileIds: [fileId],
|
||||
agentId: agent.id,
|
||||
});
|
||||
expect(accessAsUser.get(fileId)).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,8 +1,7 @@
|
||||
const { z } = require('zod');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { createTempChatExpirationDate } = require('@librechat/api');
|
||||
const { Message } = require('~/db/models');
|
||||
|
||||
const Message = require('~/db/models').Message;
|
||||
const idSchema = z.string().uuid();
|
||||
|
||||
/**
|
||||
@@ -10,7 +9,7 @@ const idSchema = z.string().uuid();
|
||||
*
|
||||
* @async
|
||||
* @function saveMessage
|
||||
* @param {ServerRequest} req - The request object containing user information.
|
||||
* @param {Express.Request} req - The request object containing user information.
|
||||
* @param {Object} params - The message data object.
|
||||
* @param {string} params.endpoint - The endpoint where the message originated.
|
||||
* @param {string} params.iconURL - The URL of the sender's icon.
|
||||
@@ -55,14 +54,9 @@ async function saveMessage(req, params, metadata) {
|
||||
};
|
||||
|
||||
if (req?.body?.isTemporary) {
|
||||
try {
|
||||
const appConfig = req.config;
|
||||
update.expiredAt = createTempChatExpirationDate(appConfig?.interfaceConfig);
|
||||
} catch (err) {
|
||||
logger.error('Error creating temporary chat expiration date:', err);
|
||||
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
|
||||
update.expiredAt = null;
|
||||
}
|
||||
const expiredAt = new Date();
|
||||
expiredAt.setDate(expiredAt.getDate() + 30);
|
||||
update.expiredAt = expiredAt;
|
||||
} else {
|
||||
update.expiredAt = null;
|
||||
}
|
||||
@@ -259,7 +253,6 @@ async function updateMessage(req, message, metadata) {
|
||||
text: updatedMessage.text,
|
||||
isCreatedByUser: updatedMessage.isCreatedByUser,
|
||||
tokenCount: updatedMessage.tokenCount,
|
||||
feedback: updatedMessage.feedback,
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error('Error updating message:', err);
|
||||
|
||||
@@ -1,119 +1,116 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { messageSchema } = require('@librechat/data-schemas');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
|
||||
jest.mock('mongoose');
|
||||
|
||||
const mockFindQuery = {
|
||||
select: jest.fn().mockReturnThis(),
|
||||
sort: jest.fn().mockReturnThis(),
|
||||
lean: jest.fn().mockReturnThis(),
|
||||
deleteMany: jest.fn().mockResolvedValue({ deletedCount: 1 }),
|
||||
};
|
||||
|
||||
const mockSchema = {
|
||||
findOneAndUpdate: jest.fn(),
|
||||
updateOne: jest.fn(),
|
||||
findOne: jest.fn(() => ({
|
||||
lean: jest.fn(),
|
||||
})),
|
||||
find: jest.fn(() => mockFindQuery),
|
||||
deleteMany: jest.fn(),
|
||||
};
|
||||
|
||||
jest.mock('~/config/winston', () => ({
|
||||
error: jest.fn(),
|
||||
}));
|
||||
|
||||
const mockModels = {
|
||||
Message: {
|
||||
findOneAndUpdate: mockSchema.findOneAndUpdate,
|
||||
updateOne: mockSchema.updateOne,
|
||||
findOne: mockSchema.findOne,
|
||||
find: mockSchema.find,
|
||||
deleteMany: mockSchema.deleteMany,
|
||||
},
|
||||
};
|
||||
|
||||
const {
|
||||
saveMessage,
|
||||
getMessages,
|
||||
updateMessage,
|
||||
deleteMessages,
|
||||
bulkSaveMessages,
|
||||
updateMessageText,
|
||||
deleteMessagesSince,
|
||||
} = require('./Message');
|
||||
|
||||
jest.mock('~/server/services/Config/app');
|
||||
|
||||
/**
|
||||
* @type {import('mongoose').Model<import('@librechat/data-schemas').IMessage>}
|
||||
*/
|
||||
let Message;
|
||||
} = require('~/models/Message');
|
||||
|
||||
describe('Message Operations', () => {
|
||||
let mongoServer;
|
||||
let mockReq;
|
||||
let mockMessageData;
|
||||
let mockMessage;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
Message = mongoose.models.Message || mongoose.model('Message', messageSchema);
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear database
|
||||
await Message.deleteMany({});
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
mockReq = {
|
||||
user: { id: 'user123' },
|
||||
config: {
|
||||
interfaceConfig: {
|
||||
temporaryChatRetention: 24, // Default 24 hours
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
mockMessageData = {
|
||||
mockMessage = {
|
||||
messageId: 'msg123',
|
||||
conversationId: uuidv4(),
|
||||
text: 'Hello, world!',
|
||||
user: 'user123',
|
||||
};
|
||||
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue({
|
||||
toObject: () => mockMessage,
|
||||
});
|
||||
});
|
||||
|
||||
describe('saveMessage', () => {
|
||||
it('should save a message for an authenticated user', async () => {
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.messageId).toBe('msg123');
|
||||
expect(result.user).toBe('user123');
|
||||
expect(result.text).toBe('Hello, world!');
|
||||
|
||||
// Verify the message was actually saved to the database
|
||||
const savedMessage = await Message.findOne({ messageId: 'msg123', user: 'user123' });
|
||||
expect(savedMessage).toBeTruthy();
|
||||
expect(savedMessage.text).toBe('Hello, world!');
|
||||
const result = await saveMessage(mockReq, mockMessage);
|
||||
expect(result).toEqual(mockMessage);
|
||||
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
|
||||
{ messageId: 'msg123', user: 'user123' },
|
||||
expect.objectContaining({ user: 'user123' }),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for unauthenticated user', async () => {
|
||||
mockReq.user = null;
|
||||
await expect(saveMessage(mockReq, mockMessageData)).rejects.toThrow('User not authenticated');
|
||||
await expect(saveMessage(mockReq, mockMessage)).rejects.toThrow('User not authenticated');
|
||||
});
|
||||
|
||||
it('should handle invalid conversation ID gracefully', async () => {
|
||||
mockMessageData.conversationId = 'invalid-id';
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
expect(result).toBeUndefined();
|
||||
it('should throw an error for invalid conversation ID', async () => {
|
||||
mockMessage.conversationId = 'invalid-id';
|
||||
await expect(saveMessage(mockReq, mockMessage)).resolves.toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateMessageText', () => {
|
||||
it('should update message text for the authenticated user', async () => {
|
||||
// First save a message
|
||||
await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
// Then update it
|
||||
await updateMessageText(mockReq, { messageId: 'msg123', text: 'Updated text' });
|
||||
|
||||
// Verify the update
|
||||
const updatedMessage = await Message.findOne({ messageId: 'msg123', user: 'user123' });
|
||||
expect(updatedMessage.text).toBe('Updated text');
|
||||
expect(mockSchema.updateOne).toHaveBeenCalledWith(
|
||||
{ messageId: 'msg123', user: 'user123' },
|
||||
{ text: 'Updated text' },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateMessage', () => {
|
||||
it('should update a message for the authenticated user', async () => {
|
||||
// First save a message
|
||||
await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue(mockMessage);
|
||||
const result = await updateMessage(mockReq, { messageId: 'msg123', text: 'Updated text' });
|
||||
|
||||
expect(result.messageId).toBe('msg123');
|
||||
expect(result.text).toBe('Updated text');
|
||||
|
||||
// Verify in database
|
||||
const updatedMessage = await Message.findOne({ messageId: 'msg123', user: 'user123' });
|
||||
expect(updatedMessage.text).toBe('Updated text');
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
messageId: 'msg123',
|
||||
text: 'Hello, world!',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error if message is not found', async () => {
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue(null);
|
||||
await expect(
|
||||
updateMessage(mockReq, { messageId: 'nonexistent', text: 'Test' }),
|
||||
).rejects.toThrow('Message not found or user not authorized.');
|
||||
@@ -122,45 +119,19 @@ describe('Message Operations', () => {
|
||||
|
||||
describe('deleteMessagesSince', () => {
|
||||
it('should delete messages only for the authenticated user', async () => {
|
||||
const conversationId = uuidv4();
|
||||
|
||||
// Create multiple messages in the same conversation
|
||||
await saveMessage(mockReq, {
|
||||
messageId: 'msg1',
|
||||
conversationId,
|
||||
text: 'First message',
|
||||
user: 'user123',
|
||||
mockSchema.findOne().lean.mockResolvedValueOnce({ createdAt: new Date() });
|
||||
mockFindQuery.deleteMany.mockResolvedValueOnce({ deletedCount: 1 });
|
||||
const result = await deleteMessagesSince(mockReq, {
|
||||
messageId: 'msg123',
|
||||
conversationId: 'convo123',
|
||||
});
|
||||
|
||||
await saveMessage(mockReq, {
|
||||
messageId: 'msg2',
|
||||
conversationId,
|
||||
text: 'Second message',
|
||||
user: 'user123',
|
||||
});
|
||||
|
||||
await saveMessage(mockReq, {
|
||||
messageId: 'msg3',
|
||||
conversationId,
|
||||
text: 'Third message',
|
||||
user: 'user123',
|
||||
});
|
||||
|
||||
// Delete messages since message2 (this should only delete messages created AFTER msg2)
|
||||
await deleteMessagesSince(mockReq, {
|
||||
messageId: 'msg2',
|
||||
conversationId,
|
||||
});
|
||||
|
||||
// Verify msg1 and msg2 remain, msg3 is deleted
|
||||
const remainingMessages = await Message.find({ conversationId, user: 'user123' });
|
||||
expect(remainingMessages).toHaveLength(2);
|
||||
expect(remainingMessages.map((m) => m.messageId)).toContain('msg1');
|
||||
expect(remainingMessages.map((m) => m.messageId)).toContain('msg2');
|
||||
expect(remainingMessages.map((m) => m.messageId)).not.toContain('msg3');
|
||||
expect(mockSchema.findOne).toHaveBeenCalledWith({ messageId: 'msg123', user: 'user123' });
|
||||
expect(mockSchema.find).not.toHaveBeenCalled();
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return undefined if no message is found', async () => {
|
||||
mockSchema.findOne().lean.mockResolvedValueOnce(null);
|
||||
const result = await deleteMessagesSince(mockReq, {
|
||||
messageId: 'nonexistent',
|
||||
conversationId: 'convo123',
|
||||
@@ -171,71 +142,29 @@ describe('Message Operations', () => {
|
||||
|
||||
describe('getMessages', () => {
|
||||
it('should retrieve messages with the correct filter', async () => {
|
||||
const conversationId = uuidv4();
|
||||
|
||||
// Save some messages
|
||||
await saveMessage(mockReq, {
|
||||
messageId: 'msg1',
|
||||
conversationId,
|
||||
text: 'First message',
|
||||
user: 'user123',
|
||||
});
|
||||
|
||||
await saveMessage(mockReq, {
|
||||
messageId: 'msg2',
|
||||
conversationId,
|
||||
text: 'Second message',
|
||||
user: 'user123',
|
||||
});
|
||||
|
||||
const messages = await getMessages({ conversationId });
|
||||
expect(messages).toHaveLength(2);
|
||||
expect(messages[0].text).toBe('First message');
|
||||
expect(messages[1].text).toBe('Second message');
|
||||
const filter = { conversationId: 'convo123' };
|
||||
await getMessages(filter);
|
||||
expect(mockSchema.find).toHaveBeenCalledWith(filter);
|
||||
expect(mockFindQuery.sort).toHaveBeenCalledWith({ createdAt: 1 });
|
||||
expect(mockFindQuery.lean).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteMessages', () => {
|
||||
it('should delete messages with the correct filter', async () => {
|
||||
// Save some messages for different users
|
||||
await saveMessage(mockReq, mockMessageData);
|
||||
await saveMessage(
|
||||
{ user: { id: 'user456' } },
|
||||
{
|
||||
messageId: 'msg456',
|
||||
conversationId: uuidv4(),
|
||||
text: 'Other user message',
|
||||
user: 'user456',
|
||||
},
|
||||
);
|
||||
|
||||
await deleteMessages({ user: 'user123' });
|
||||
|
||||
// Verify only user123's messages were deleted
|
||||
const user123Messages = await Message.find({ user: 'user123' });
|
||||
const user456Messages = await Message.find({ user: 'user456' });
|
||||
|
||||
expect(user123Messages).toHaveLength(0);
|
||||
expect(user456Messages).toHaveLength(1);
|
||||
expect(mockSchema.deleteMany).toHaveBeenCalledWith({ user: 'user123' });
|
||||
});
|
||||
});
|
||||
|
||||
describe('Conversation Hijacking Prevention', () => {
|
||||
it("should not allow editing a message in another user's conversation", async () => {
|
||||
const attackerReq = { user: { id: 'attacker123' } };
|
||||
const victimConversationId = uuidv4();
|
||||
const victimConversationId = 'victim-convo-123';
|
||||
const victimMessageId = 'victim-msg-123';
|
||||
|
||||
// First, save a message as the victim (but we'll try to edit as attacker)
|
||||
const victimReq = { user: { id: 'victim123' } };
|
||||
await saveMessage(victimReq, {
|
||||
messageId: victimMessageId,
|
||||
conversationId: victimConversationId,
|
||||
text: 'Victim message',
|
||||
user: 'victim123',
|
||||
});
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue(null);
|
||||
|
||||
// Attacker tries to edit the victim's message
|
||||
await expect(
|
||||
updateMessage(attackerReq, {
|
||||
messageId: victimMessageId,
|
||||
@@ -244,333 +173,71 @@ describe('Message Operations', () => {
|
||||
}),
|
||||
).rejects.toThrow('Message not found or user not authorized.');
|
||||
|
||||
// Verify the original message is unchanged
|
||||
const originalMessage = await Message.findOne({
|
||||
messageId: victimMessageId,
|
||||
user: 'victim123',
|
||||
});
|
||||
expect(originalMessage.text).toBe('Victim message');
|
||||
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
|
||||
{ messageId: victimMessageId, user: 'attacker123' },
|
||||
expect.anything(),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it("should not allow deleting messages from another user's conversation", async () => {
|
||||
const attackerReq = { user: { id: 'attacker123' } };
|
||||
const victimConversationId = uuidv4();
|
||||
const victimConversationId = 'victim-convo-123';
|
||||
const victimMessageId = 'victim-msg-123';
|
||||
|
||||
// Save a message as the victim
|
||||
const victimReq = { user: { id: 'victim123' } };
|
||||
await saveMessage(victimReq, {
|
||||
messageId: victimMessageId,
|
||||
conversationId: victimConversationId,
|
||||
text: 'Victim message',
|
||||
user: 'victim123',
|
||||
});
|
||||
|
||||
// Attacker tries to delete from victim's conversation
|
||||
mockSchema.findOne().lean.mockResolvedValueOnce(null); // Simulating message not found for this user
|
||||
const result = await deleteMessagesSince(attackerReq, {
|
||||
messageId: victimMessageId,
|
||||
conversationId: victimConversationId,
|
||||
});
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
|
||||
// Verify the victim's message still exists
|
||||
const victimMessage = await Message.findOne({
|
||||
expect(mockSchema.findOne).toHaveBeenCalledWith({
|
||||
messageId: victimMessageId,
|
||||
user: 'victim123',
|
||||
user: 'attacker123',
|
||||
});
|
||||
expect(victimMessage).toBeTruthy();
|
||||
expect(victimMessage.text).toBe('Victim message');
|
||||
});
|
||||
|
||||
it("should not allow inserting a new message into another user's conversation", async () => {
|
||||
const attackerReq = { user: { id: 'attacker123' } };
|
||||
const victimConversationId = uuidv4();
|
||||
const victimConversationId = uuidv4(); // Use a valid UUID
|
||||
|
||||
// Attacker tries to save a message - this should succeed but with attacker's user ID
|
||||
const result = await saveMessage(attackerReq, {
|
||||
conversationId: victimConversationId,
|
||||
text: 'Inserted malicious message',
|
||||
messageId: 'new-msg-123',
|
||||
user: 'attacker123',
|
||||
});
|
||||
await expect(
|
||||
saveMessage(attackerReq, {
|
||||
conversationId: victimConversationId,
|
||||
text: 'Inserted malicious message',
|
||||
messageId: 'new-msg-123',
|
||||
}),
|
||||
).resolves.not.toThrow(); // It should not throw an error
|
||||
|
||||
expect(result).toBeTruthy();
|
||||
expect(result.user).toBe('attacker123');
|
||||
|
||||
// Verify the message was saved with the attacker's user ID, not as an anonymous message
|
||||
const savedMessage = await Message.findOne({ messageId: 'new-msg-123' });
|
||||
expect(savedMessage.user).toBe('attacker123');
|
||||
expect(savedMessage.conversationId).toBe(victimConversationId);
|
||||
// Check that the message was saved with the attacker's user ID
|
||||
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
|
||||
{ messageId: 'new-msg-123', user: 'attacker123' },
|
||||
expect.objectContaining({
|
||||
user: 'attacker123',
|
||||
conversationId: victimConversationId,
|
||||
}),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should allow retrieving messages from any conversation', async () => {
|
||||
const victimConversationId = uuidv4();
|
||||
const victimConversationId = 'victim-convo-123';
|
||||
|
||||
// Save a message in the victim's conversation
|
||||
const victimReq = { user: { id: 'victim123' } };
|
||||
await saveMessage(victimReq, {
|
||||
messageId: 'victim-msg',
|
||||
await getMessages({ conversationId: victimConversationId });
|
||||
|
||||
expect(mockSchema.find).toHaveBeenCalledWith({
|
||||
conversationId: victimConversationId,
|
||||
text: 'Victim message',
|
||||
user: 'victim123',
|
||||
});
|
||||
|
||||
// Anyone should be able to retrieve messages by conversation ID
|
||||
const messages = await getMessages({ conversationId: victimConversationId });
|
||||
expect(messages).toHaveLength(1);
|
||||
expect(messages[0].text).toBe('Victim message');
|
||||
});
|
||||
});
|
||||
|
||||
describe('isTemporary message handling', () => {
|
||||
beforeEach(() => {
|
||||
// Reset mocks before each test
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should save a message with expiredAt when isTemporary is true', async () => {
|
||||
// Mock app config with 24 hour retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
const afterSave = new Date();
|
||||
|
||||
expect(result.messageId).toBe('msg123');
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
expect(result.expiredAt).toBeInstanceOf(Date);
|
||||
|
||||
// Verify expiredAt is approximately 24 hours in the future
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
new Date(afterSave.getTime() + 24 * 60 * 60 * 1000 + 1000).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should save a message without expiredAt when isTemporary is false', async () => {
|
||||
mockReq.body = { isTemporary: false };
|
||||
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.messageId).toBe('msg123');
|
||||
expect(result.expiredAt).toBeNull();
|
||||
});
|
||||
|
||||
it('should save a message without expiredAt when isTemporary is not provided', async () => {
|
||||
// No isTemporary in body
|
||||
mockReq.body = {};
|
||||
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.messageId).toBe('msg123');
|
||||
expect(result.expiredAt).toBeNull();
|
||||
});
|
||||
|
||||
it('should use custom retention period from config', async () => {
|
||||
// Mock app config with 48 hour retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 48;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Verify expiredAt is approximately 48 hours in the future
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 48 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle minimum retention period (1 hour)', async () => {
|
||||
// Mock app config with less than minimum retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 0.5; // Half hour - should be clamped to 1 hour
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Verify expiredAt is approximately 1 hour in the future (minimum)
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 1 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle maximum retention period (8760 hours)', async () => {
|
||||
// Mock app config with more than maximum retention
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 10000; // Should be clamped to 8760 hours
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Verify expiredAt is approximately 8760 hours (1 year) in the future
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 8760 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle missing config gracefully', async () => {
|
||||
// Simulate missing config - should use default retention period
|
||||
delete mockReq.config;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
const afterSave = new Date();
|
||||
|
||||
// Should still save the message with default retention period (30 days)
|
||||
expect(result.messageId).toBe('msg123');
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
expect(result.expiredAt).toBeInstanceOf(Date);
|
||||
|
||||
// Verify expiredAt is approximately 30 days in the future (720 hours)
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 720 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
new Date(afterSave.getTime() + 720 * 60 * 60 * 1000 + 1000).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should use default retention when config is not provided', async () => {
|
||||
// Mock getAppConfig to return empty config
|
||||
mockReq.config = {}; // Empty config
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
|
||||
const beforeSave = new Date();
|
||||
const result = await saveMessage(mockReq, mockMessageData);
|
||||
|
||||
expect(result.expiredAt).toBeDefined();
|
||||
|
||||
// Default retention is 30 days (720 hours)
|
||||
const expectedExpirationTime = new Date(beforeSave.getTime() + 30 * 24 * 60 * 60 * 1000);
|
||||
const actualExpirationTime = new Date(result.expiredAt);
|
||||
|
||||
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
|
||||
expectedExpirationTime.getTime() - 1000,
|
||||
);
|
||||
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
|
||||
expectedExpirationTime.getTime() + 1000,
|
||||
);
|
||||
});
|
||||
|
||||
it('should not update expiredAt on message update', async () => {
|
||||
// First save a temporary message
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
const savedMessage = await saveMessage(mockReq, mockMessageData);
|
||||
const originalExpiredAt = savedMessage.expiredAt;
|
||||
|
||||
// Now update the message without isTemporary flag
|
||||
mockReq.body = {};
|
||||
const updatedMessage = await updateMessage(mockReq, {
|
||||
messageId: 'msg123',
|
||||
text: 'Updated text',
|
||||
mockSchema.find.mockReturnValueOnce({
|
||||
select: jest.fn().mockReturnThis(),
|
||||
sort: jest.fn().mockReturnThis(),
|
||||
lean: jest.fn().mockResolvedValue([{ text: 'Test message' }]),
|
||||
});
|
||||
|
||||
// expiredAt should not be in the returned updated message object
|
||||
expect(updatedMessage.expiredAt).toBeUndefined();
|
||||
|
||||
// Verify in database that expiredAt wasn't changed
|
||||
const dbMessage = await Message.findOne({ messageId: 'msg123', user: 'user123' });
|
||||
expect(dbMessage.expiredAt).toEqual(originalExpiredAt);
|
||||
});
|
||||
|
||||
it('should preserve expiredAt when saving existing temporary message', async () => {
|
||||
// First save a temporary message
|
||||
mockReq.config.interfaceConfig.temporaryChatRetention = 24;
|
||||
|
||||
mockReq.body = { isTemporary: true };
|
||||
const firstSave = await saveMessage(mockReq, mockMessageData);
|
||||
const originalExpiredAt = firstSave.expiredAt;
|
||||
|
||||
// Wait a bit to ensure time difference
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
|
||||
// Save again with same messageId but different text
|
||||
const updatedData = { ...mockMessageData, text: 'Updated text' };
|
||||
const secondSave = await saveMessage(mockReq, updatedData);
|
||||
|
||||
// Should update text but create new expiredAt
|
||||
expect(secondSave.text).toBe('Updated text');
|
||||
expect(secondSave.expiredAt).toBeDefined();
|
||||
expect(new Date(secondSave.expiredAt).getTime()).toBeGreaterThan(
|
||||
new Date(originalExpiredAt).getTime(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle bulk operations with temporary messages', async () => {
|
||||
// This test verifies bulkSaveMessages doesn't interfere with expiredAt
|
||||
const messages = [
|
||||
{
|
||||
messageId: 'bulk1',
|
||||
conversationId: uuidv4(),
|
||||
text: 'Bulk message 1',
|
||||
user: 'user123',
|
||||
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000),
|
||||
},
|
||||
{
|
||||
messageId: 'bulk2',
|
||||
conversationId: uuidv4(),
|
||||
text: 'Bulk message 2',
|
||||
user: 'user123',
|
||||
expiredAt: null,
|
||||
},
|
||||
];
|
||||
|
||||
await bulkSaveMessages(messages);
|
||||
|
||||
const savedMessages = await Message.find({
|
||||
messageId: { $in: ['bulk1', 'bulk2'] },
|
||||
}).lean();
|
||||
|
||||
expect(savedMessages).toHaveLength(2);
|
||||
|
||||
const bulk1 = savedMessages.find((m) => m.messageId === 'bulk1');
|
||||
const bulk2 = savedMessages.find((m) => m.messageId === 'bulk2');
|
||||
|
||||
expect(bulk1.expiredAt).toBeDefined();
|
||||
expect(bulk2.expiredAt).toBeNull();
|
||||
const result = await getMessages({ conversationId: victimConversationId });
|
||||
expect(result).toEqual([{ text: 'Test message' }]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Preset } = require('~/db/models');
|
||||
|
||||
const Preset = require('~/db/models').Preset;
|
||||
|
||||
const getPreset = async (user, presetId) => {
|
||||
try {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
|
||||
const { Project } = require('~/db/models');
|
||||
|
||||
const Project = require('~/db/models').Project;
|
||||
|
||||
/**
|
||||
* Retrieve a project by ID and convert the found project document to a plain object.
|
||||
|
||||
@@ -1,21 +1,18 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { ObjectId } = require('mongodb');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { SystemRoles, SystemCategories, Constants } = require('librechat-data-provider');
|
||||
const {
|
||||
Constants,
|
||||
SystemRoles,
|
||||
ResourceType,
|
||||
SystemCategories,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
removeGroupFromAllProjects,
|
||||
removeGroupIdsFromProject,
|
||||
addGroupIdsToProject,
|
||||
getProjectByName,
|
||||
addGroupIdsToProject,
|
||||
removeGroupIdsFromProject,
|
||||
removeGroupFromAllProjects,
|
||||
} = require('./Project');
|
||||
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
||||
const { PromptGroup, Prompt } = require('~/db/models');
|
||||
const { escapeRegExp } = require('~/server/utils');
|
||||
|
||||
const PromptGroup = require('~/db/models').PromptGroup;
|
||||
const Prompt = require('~/db/models').Prompt;
|
||||
|
||||
/**
|
||||
* Create a pipeline for the aggregation to get prompt groups
|
||||
* @param {Object} query
|
||||
@@ -106,6 +103,10 @@ const getAllPromptGroups = async (req, filter) => {
|
||||
try {
|
||||
const { name, ...query } = filter;
|
||||
|
||||
if (!query.author) {
|
||||
throw new Error('Author is required');
|
||||
}
|
||||
|
||||
let searchShared = true;
|
||||
let searchSharedOnly = false;
|
||||
if (name) {
|
||||
@@ -155,6 +156,10 @@ const getPromptGroups = async (req, filter) => {
|
||||
const validatedPageNumber = Math.max(parseInt(pageNumber, 10), 1);
|
||||
const validatedPageSize = Math.max(parseInt(pageSize, 10), 1);
|
||||
|
||||
if (!query.author) {
|
||||
throw new Error('Author is required');
|
||||
}
|
||||
|
||||
let searchShared = true;
|
||||
let searchSharedOnly = false;
|
||||
if (name) {
|
||||
@@ -219,16 +224,12 @@ const getPromptGroups = async (req, filter) => {
|
||||
* @returns {Promise<TDeletePromptGroupResponse>}
|
||||
*/
|
||||
const deletePromptGroup = async ({ _id, author, role }) => {
|
||||
// Build query - with ACL, author is optional
|
||||
const query = { _id };
|
||||
const groupQuery = { groupId: new ObjectId(_id) };
|
||||
|
||||
// Legacy: Add author filter if provided (backward compatibility)
|
||||
if (author && role !== SystemRoles.ADMIN) {
|
||||
query.author = author;
|
||||
groupQuery.author = author;
|
||||
const query = { _id, author };
|
||||
const groupQuery = { groupId: new ObjectId(_id), author };
|
||||
if (role === SystemRoles.ADMIN) {
|
||||
delete query.author;
|
||||
delete groupQuery.author;
|
||||
}
|
||||
|
||||
const response = await PromptGroup.deleteOne(query);
|
||||
|
||||
if (!response || response.deletedCount === 0) {
|
||||
@@ -237,140 +238,13 @@ const deletePromptGroup = async ({ _id, author, role }) => {
|
||||
|
||||
await Prompt.deleteMany(groupQuery);
|
||||
await removeGroupFromAllProjects(_id);
|
||||
|
||||
try {
|
||||
await removeAllPermissions({ resourceType: ResourceType.PROMPTGROUP, resourceId: _id });
|
||||
} catch (error) {
|
||||
logger.error('Error removing promptGroup permissions:', error);
|
||||
}
|
||||
|
||||
return { message: 'Prompt group deleted successfully' };
|
||||
};
|
||||
|
||||
/**
|
||||
* Get prompt groups by accessible IDs with optional cursor-based pagination.
|
||||
* @param {Object} params - The parameters for getting accessible prompt groups.
|
||||
* @param {Array} [params.accessibleIds] - Array of prompt group ObjectIds the user has ACL access to.
|
||||
* @param {Object} [params.otherParams] - Additional query parameters (including author filter).
|
||||
* @param {number} [params.limit] - Number of prompt groups to return (max 100). If not provided, returns all prompt groups.
|
||||
* @param {string} [params.after] - Cursor for pagination - get prompt groups after this cursor. // base64 encoded JSON string with updatedAt and _id.
|
||||
* @returns {Promise<Object>} A promise that resolves to an object containing the prompt groups data and pagination info.
|
||||
*/
|
||||
async function getListPromptGroupsByAccess({
|
||||
accessibleIds = [],
|
||||
otherParams = {},
|
||||
limit = null,
|
||||
after = null,
|
||||
}) {
|
||||
const isPaginated = limit !== null && limit !== undefined;
|
||||
const normalizedLimit = isPaginated ? Math.min(Math.max(1, parseInt(limit) || 20), 100) : null;
|
||||
|
||||
// Build base query combining ACL accessible prompt groups with other filters
|
||||
const baseQuery = { ...otherParams, _id: { $in: accessibleIds } };
|
||||
|
||||
// Add cursor condition
|
||||
if (after && typeof after === 'string' && after !== 'undefined' && after !== 'null') {
|
||||
try {
|
||||
const cursor = JSON.parse(Buffer.from(after, 'base64').toString('utf8'));
|
||||
const { updatedAt, _id } = cursor;
|
||||
|
||||
const cursorCondition = {
|
||||
$or: [
|
||||
{ updatedAt: { $lt: new Date(updatedAt) } },
|
||||
{ updatedAt: new Date(updatedAt), _id: { $gt: new ObjectId(_id) } },
|
||||
],
|
||||
};
|
||||
|
||||
// Merge cursor condition with base query
|
||||
if (Object.keys(baseQuery).length > 0) {
|
||||
baseQuery.$and = [{ ...baseQuery }, cursorCondition];
|
||||
// Remove the original conditions from baseQuery to avoid duplication
|
||||
Object.keys(baseQuery).forEach((key) => {
|
||||
if (key !== '$and') delete baseQuery[key];
|
||||
});
|
||||
} else {
|
||||
Object.assign(baseQuery, cursorCondition);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.warn('Invalid cursor:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Build aggregation pipeline
|
||||
const pipeline = [{ $match: baseQuery }, { $sort: { updatedAt: -1, _id: 1 } }];
|
||||
|
||||
// Only apply limit if pagination is requested
|
||||
if (isPaginated) {
|
||||
pipeline.push({ $limit: normalizedLimit + 1 });
|
||||
}
|
||||
|
||||
// Add lookup for production prompt
|
||||
pipeline.push(
|
||||
{
|
||||
$lookup: {
|
||||
from: 'prompts',
|
||||
localField: 'productionId',
|
||||
foreignField: '_id',
|
||||
as: 'productionPrompt',
|
||||
},
|
||||
},
|
||||
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
|
||||
{
|
||||
$project: {
|
||||
name: 1,
|
||||
numberOfGenerations: 1,
|
||||
oneliner: 1,
|
||||
category: 1,
|
||||
projectIds: 1,
|
||||
productionId: 1,
|
||||
author: 1,
|
||||
authorName: 1,
|
||||
createdAt: 1,
|
||||
updatedAt: 1,
|
||||
'productionPrompt.prompt': 1,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const promptGroups = await PromptGroup.aggregate(pipeline).exec();
|
||||
|
||||
const hasMore = isPaginated ? promptGroups.length > normalizedLimit : false;
|
||||
const data = (isPaginated ? promptGroups.slice(0, normalizedLimit) : promptGroups).map(
|
||||
(group) => {
|
||||
if (group.author) {
|
||||
group.author = group.author.toString();
|
||||
}
|
||||
return group;
|
||||
},
|
||||
);
|
||||
|
||||
// Generate next cursor only if paginated
|
||||
let nextCursor = null;
|
||||
if (isPaginated && hasMore && data.length > 0) {
|
||||
const lastGroup = promptGroups[normalizedLimit - 1];
|
||||
nextCursor = Buffer.from(
|
||||
JSON.stringify({
|
||||
updatedAt: lastGroup.updatedAt.toISOString(),
|
||||
_id: lastGroup._id.toString(),
|
||||
}),
|
||||
).toString('base64');
|
||||
}
|
||||
|
||||
return {
|
||||
object: 'list',
|
||||
data,
|
||||
first_id: data.length > 0 ? data[0]._id.toString() : null,
|
||||
last_id: data.length > 0 ? data[data.length - 1]._id.toString() : null,
|
||||
has_more: hasMore,
|
||||
after: nextCursor,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getPromptGroups,
|
||||
deletePromptGroup,
|
||||
getAllPromptGroups,
|
||||
getListPromptGroupsByAccess,
|
||||
/**
|
||||
* Create a prompt and its respective group
|
||||
* @param {TCreatePromptRecord} saveData
|
||||
@@ -559,16 +433,6 @@ module.exports = {
|
||||
.lean();
|
||||
|
||||
if (remainingPrompts.length === 0) {
|
||||
// Remove all ACL entries for the promptGroup when deleting the last prompt
|
||||
try {
|
||||
await removeAllPermissions({
|
||||
resourceType: ResourceType.PROMPTGROUP,
|
||||
resourceId: groupId,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Error removing promptGroup permissions:', error);
|
||||
}
|
||||
|
||||
await PromptGroup.deleteOne({ _id: groupId });
|
||||
await removeGroupFromAllProjects(groupId);
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user