Compare commits
1 Commits
feat/docum
...
feat/searc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4ed22aaa59 |
63
.env.example
63
.env.example
@@ -20,8 +20,8 @@ DOMAIN_CLIENT=http://localhost:3080
|
||||
DOMAIN_SERVER=http://localhost:3080
|
||||
|
||||
NO_INDEX=true
|
||||
# Use the address that is at most n number of hops away from the Express application.
|
||||
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
|
||||
# Use the address that is at most n number of hops away from the Express application.
|
||||
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
|
||||
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
|
||||
# Defaulted to 1.
|
||||
TRUST_PROXY=1
|
||||
@@ -88,7 +88,7 @@ PROXY=
|
||||
#============#
|
||||
|
||||
ANTHROPIC_API_KEY=user_provided
|
||||
# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
|
||||
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
|
||||
# ANTHROPIC_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
@@ -142,12 +142,12 @@ GOOGLE_KEY=user_provided
|
||||
# GOOGLE_AUTH_HEADER=true
|
||||
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-exp-03-25,gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-2.5-pro-preview-05-06,gemini-2.5-flash-preview-04-17,gemini-2.0-flash-001,gemini-2.0-flash-exp,gemini-2.0-flash-lite-001,gemini-1.5-pro-002,gemini-1.5-flash-002
|
||||
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
|
||||
|
||||
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
||||
# GOOGLE_TITLE_MODEL=gemini-pro
|
||||
|
||||
# GOOGLE_LOC=us-central1
|
||||
|
||||
@@ -444,21 +444,6 @@ OPENID_IMAGE_URL=
|
||||
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
|
||||
OPENID_AUTO_REDIRECT=false
|
||||
|
||||
# Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication
|
||||
OPENID_USE_PKCE=false
|
||||
#Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token.
|
||||
OPENID_REUSE_TOKENS=
|
||||
#By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint.
|
||||
#If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache.
|
||||
#Default is true.
|
||||
OPENID_JWKS_URL_CACHE_ENABLED=
|
||||
OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
|
||||
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
|
||||
OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED=
|
||||
OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE = "user.read" # example for Scope Needed for Microsoft Graph API
|
||||
# Set to true to use the OpenID Connect end session endpoint for logout
|
||||
OPENID_USE_END_SESSION_ENDPOINT=
|
||||
|
||||
# LDAP
|
||||
LDAP_URL=
|
||||
LDAP_BIND_DN=
|
||||
@@ -578,9 +563,9 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||
# users always get the latest version. Customize #
|
||||
# only if you understand caching implications. #
|
||||
|
||||
# INDEX_CACHE_CONTROL=no-cache, no-store, must-revalidate
|
||||
# INDEX_PRAGMA=no-cache
|
||||
# INDEX_EXPIRES=0
|
||||
# INDEX_HTML_CACHE_CONTROL=no-cache, no-store, must-revalidate
|
||||
# INDEX_HTML_PRAGMA=no-cache
|
||||
# INDEX_HTML_EXPIRES=0
|
||||
|
||||
# no-cache: Forces validation with server before using cached version
|
||||
# no-store: Prevents storing the response entirely
|
||||
@@ -590,33 +575,3 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||
# OpenWeather #
|
||||
#=====================================================#
|
||||
OPENWEATHER_API_KEY=
|
||||
|
||||
#====================================#
|
||||
# LibreChat Code Interpreter API #
|
||||
#====================================#
|
||||
|
||||
# https://code.librechat.ai
|
||||
# LIBRECHAT_CODE_API_KEY=your-key
|
||||
|
||||
#======================#
|
||||
# Web Search #
|
||||
#======================#
|
||||
|
||||
# Note: All of the following variable names can be customized.
|
||||
# Omit values to allow user to provide them.
|
||||
|
||||
# For more information on configuration values, see:
|
||||
# https://librechat.ai/docs/features/web_search
|
||||
|
||||
# Search Provider (Required)
|
||||
# SERPER_API_KEY=your_serper_api_key
|
||||
|
||||
# Scraper (Required)
|
||||
# FIRECRAWL_API_KEY=your_firecrawl_api_key
|
||||
# Optional: Custom Firecrawl API URL
|
||||
# FIRECRAWL_API_URL=your_firecrawl_api_url
|
||||
|
||||
# Reranker (Required)
|
||||
# JINA_API_KEY=your_jina_api_key
|
||||
# or
|
||||
# COHERE_API_KEY=your_cohere_api_key
|
||||
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
generate-release-changelog-pr:
|
||||
@@ -89,7 +88,7 @@ jobs:
|
||||
base: main
|
||||
branch: "changelog/${{ github.ref_name }}"
|
||||
reviewers: danny-avila
|
||||
title: "📜 docs: Changelog for release ${{ github.ref_name }}"
|
||||
title: "chore: update CHANGELOG for release ${{ github.ref_name }}"
|
||||
body: |
|
||||
**Description**:
|
||||
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.
|
||||
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.
|
||||
@@ -3,7 +3,6 @@ name: Generate Unreleased Changelog PR
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
generate-unreleased-changelog-pr:
|
||||
@@ -99,9 +98,9 @@ jobs:
|
||||
branch: "changelog/unreleased-update"
|
||||
sign-commits: true
|
||||
commit-message: "action: update Unreleased changelog"
|
||||
title: "📜 docs: Unreleased Changelog"
|
||||
title: "action: update Unreleased changelog"
|
||||
body: |
|
||||
**Description**:
|
||||
- This PR updates the Unreleased section in CHANGELOG.md.
|
||||
- It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}),
|
||||
regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.
|
||||
regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.
|
||||
7
.github/workflows/helmcharts.yml
vendored
7
.github/workflows/helmcharts.yml
vendored
@@ -26,15 +26,8 @@ jobs:
|
||||
uses: azure/setup-helm@v4
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- name: Build Subchart Deps
|
||||
run: |
|
||||
cd helm/librechat-rag-api
|
||||
helm dependency build
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
with:
|
||||
charts_dir: helm
|
||||
skip_existing: true
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
37
.github/workflows/i18n-unused-keys.yml
vendored
37
.github/workflows/i18n-unused-keys.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
|
||||
# Define paths
|
||||
I18N_FILE="client/src/locales/en/translation.json"
|
||||
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src")
|
||||
SOURCE_DIRS=("client/src" "api")
|
||||
|
||||
# Check if translation file exists
|
||||
if [[ ! -f "$I18N_FILE" ]]; then
|
||||
@@ -39,35 +39,12 @@ jobs:
|
||||
# Check if each key is used in the source code
|
||||
for KEY in $KEYS; do
|
||||
FOUND=false
|
||||
|
||||
# Special case for dynamically constructed special variable keys
|
||||
if [[ "$KEY" == com_ui_special_var_* ]]; then
|
||||
# Check if TSpecialVarLabel is used in the codebase
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
if grep -r --include=\*.{js,jsx,ts,tsx} -q "TSpecialVarLabel" "$DIR"; then
|
||||
FOUND=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# Also check if the key is directly used somewhere
|
||||
if [[ "$FOUND" == false ]]; then
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
|
||||
FOUND=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
|
||||
FOUND=true
|
||||
break
|
||||
fi
|
||||
else
|
||||
# Regular check for other keys
|
||||
for DIR in "${SOURCE_DIRS[@]}"; do
|
||||
if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
|
||||
FOUND=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$FOUND" == false ]]; then
|
||||
UNUSED_KEYS+=("$KEY")
|
||||
@@ -113,4 +90,4 @@ jobs:
|
||||
|
||||
- name: Fail workflow if unused keys found
|
||||
if: env.unused_keys != '[]'
|
||||
run: exit 1
|
||||
run: exit 1
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -52,9 +52,8 @@ bower_components/
|
||||
*.d.ts
|
||||
!vite-env.d.ts
|
||||
|
||||
# AI
|
||||
# Cline
|
||||
.clineignore
|
||||
.cursor
|
||||
|
||||
# Floobits
|
||||
.floo
|
||||
@@ -114,11 +113,4 @@ uploads/
|
||||
|
||||
# owner
|
||||
release/
|
||||
|
||||
# Helm
|
||||
helm/librechat/Chart.lock
|
||||
helm/**/charts/
|
||||
helm/**/.values.yaml
|
||||
|
||||
!/client/src/@types/i18next.d.ts
|
||||
|
||||
|
||||
217
CHANGELOG.md
217
CHANGELOG.md
@@ -2,226 +2,15 @@
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
- ✨ feat: implement search parameter updates by **@mawburn** in [#7151](https://github.com/danny-avila/LibreChat/pull/7151)
|
||||
- 🎏 feat: Add MCP support for Streamable HTTP Transport by **@benverhees** in [#7353](https://github.com/danny-avila/LibreChat/pull/7353)
|
||||
- 🔒 feat: Add Content Security Policy using Helmet middleware by **@rubentalstra** in [#7377](https://github.com/danny-avila/LibreChat/pull/7377)
|
||||
- ✨ feat: Add Normalization for MCP Server Names by **@danny-avila** in [#7421](https://github.com/danny-avila/LibreChat/pull/7421)
|
||||
- 📊 feat: Improve Helm Chart by **@hofq** in [#3638](https://github.com/danny-avila/LibreChat/pull/3638)
|
||||
|
||||
### 🌍 Internationalization
|
||||
|
||||
- 🌍 i18n: Add `Danish` and `Czech` and `Catalan` localization support by **@rubentalstra** in [#7373](https://github.com/danny-avila/LibreChat/pull/7373)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7375](https://github.com/danny-avila/LibreChat/pull/7375)
|
||||
|
||||
### 🔧 Fixes
|
||||
|
||||
- 💬 fix: update aria-label for accessibility in ConvoLink component by **@berry-13** in [#7320](https://github.com/danny-avila/LibreChat/pull/7320)
|
||||
- 🔑 fix: use `apiKey` instead of `openAIApiKey` in OpenAI-like Config by **@danny-avila** in [#7337](https://github.com/danny-avila/LibreChat/pull/7337)
|
||||
- 🔄 fix: update navigation logic in `useFocusChatEffect` to ensure correct search parameters are used by **@mawburn** in [#7340](https://github.com/danny-avila/LibreChat/pull/7340)
|
||||
- 🔄 fix: Improve MCP Connection Cleanup by **@danny-avila** in [#7400](https://github.com/danny-avila/LibreChat/pull/7400)
|
||||
- 🛡️ fix: Preset and Validation Logic for URL Query Params by **@danny-avila** in [#7407](https://github.com/danny-avila/LibreChat/pull/7407)
|
||||
- 🌘 fix: artifact of preview text is illegible in dark mode by **@nhtruong** in [#7405](https://github.com/danny-avila/LibreChat/pull/7405)
|
||||
- 🛡️ fix: Temporarily Remove CSP until Configurable by **@danny-avila** in [#7419](https://github.com/danny-avila/LibreChat/pull/7419)
|
||||
- 💽 fix: Exclude index page `/` from static cache settings by **@sbruel** in [#7382](https://github.com/danny-avila/LibreChat/pull/7382)
|
||||
- 🪄 feat: Agent Artifacts by **@danny-avila** in [#5804](https://github.com/danny-avila/LibreChat/pull/5804)
|
||||
|
||||
### ⚙️ Other Changes
|
||||
|
||||
- 📜 docs: CHANGELOG for release v0.7.8 by **@github-actions[bot]** in [#7290](https://github.com/danny-avila/LibreChat/pull/7290)
|
||||
- 📦 chore: Update API Package Dependencies by **@danny-avila** in [#7359](https://github.com/danny-avila/LibreChat/pull/7359)
|
||||
- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7321](https://github.com/danny-avila/LibreChat/pull/7321)
|
||||
|
||||
|
||||
|
||||
---
|
||||
## [v0.7.8] -
|
||||
|
||||
Changes from v0.7.8-rc1 to v0.7.8.
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
- ✨ feat: Enhance form submission for touch screens by **@berry-13** in [#7198](https://github.com/danny-avila/LibreChat/pull/7198)
|
||||
- 🔍 feat: Additional Tavily API Tool Parameters by **@glowforge-opensource** in [#7232](https://github.com/danny-avila/LibreChat/pull/7232)
|
||||
- 🐋 feat: Add python to Dockerfile for increased MCP compatibility by **@technicalpickles** in [#7270](https://github.com/danny-avila/LibreChat/pull/7270)
|
||||
|
||||
### 🔧 Fixes
|
||||
|
||||
- 🔧 fix: Google Gemma Support & OpenAI Reasoning Instructions by **@danny-avila** in [#7196](https://github.com/danny-avila/LibreChat/pull/7196)
|
||||
- 🛠️ fix: Conversation Navigation State by **@danny-avila** in [#7210](https://github.com/danny-avila/LibreChat/pull/7210)
|
||||
- 🔄 fix: o-Series Model Regex for System Messages by **@danny-avila** in [#7245](https://github.com/danny-avila/LibreChat/pull/7245)
|
||||
- 🔖 fix: Custom Headers for Initial MCP SSE Connection by **@danny-avila** in [#7246](https://github.com/danny-avila/LibreChat/pull/7246)
|
||||
- 🛡️ fix: Deep Clone `MCPOptions` for User MCP Connections by **@danny-avila** in [#7247](https://github.com/danny-avila/LibreChat/pull/7247)
|
||||
- 🔄 fix: URL Param Race Condition and File Draft Persistence by **@danny-avila** in [#7257](https://github.com/danny-avila/LibreChat/pull/7257)
|
||||
- 🔄 fix: Assistants Endpoint & Minor Issues by **@danny-avila** in [#7274](https://github.com/danny-avila/LibreChat/pull/7274)
|
||||
- 🔄 fix: Ollama Think Tag Edge Case with Tools by **@danny-avila** in [#7275](https://github.com/danny-avila/LibreChat/pull/7275)
|
||||
|
||||
### ⚙️ Other Changes
|
||||
|
||||
- 📜 docs: CHANGELOG for release v0.7.8-rc1 by **@github-actions[bot]** in [#7153](https://github.com/danny-avila/LibreChat/pull/7153)
|
||||
- 🔄 refactor: Artifact Visibility Management by **@danny-avila** in [#7181](https://github.com/danny-avila/LibreChat/pull/7181)
|
||||
- 📦 chore: Bump Package Security by **@danny-avila** in [#7183](https://github.com/danny-avila/LibreChat/pull/7183)
|
||||
- 🌿 refactor: Unmount Fork Popover on Hide for Better Performance by **@danny-avila** in [#7189](https://github.com/danny-avila/LibreChat/pull/7189)
|
||||
- 🧰 chore: ESLint configuration to enforce Prettier formatting rules by **@mawburn** in [#7186](https://github.com/danny-avila/LibreChat/pull/7186)
|
||||
- 🎨 style: Improve KaTeX Rendering for LaTeX Equations by **@andresgit** in [#7223](https://github.com/danny-avila/LibreChat/pull/7223)
|
||||
- 📝 docs: Update `.env.example` Google models by **@marlonka** in [#7254](https://github.com/danny-avila/LibreChat/pull/7254)
|
||||
- 💬 refactor: MCP Chat Visibility Option, Google Rates, Remove OpenAPI Plugins by **@danny-avila** in [#7286](https://github.com/danny-avila/LibreChat/pull/7286)
|
||||
- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7214](https://github.com/danny-avila/LibreChat/pull/7214)
|
||||
|
||||
|
||||
|
||||
[See full release details][release-v0.7.8]
|
||||
|
||||
[release-v0.7.8]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8
|
||||
|
||||
---
|
||||
## [v0.7.8-rc1] -
|
||||
|
||||
Changes from v0.7.7 to v0.7.8-rc1.
|
||||
|
||||
### ✨ New Features
|
||||
|
||||
- 🔍 feat: Mistral OCR API / Upload Files as Text by **@danny-avila** in [#6274](https://github.com/danny-avila/LibreChat/pull/6274)
|
||||
- 🤖 feat: Support OpenAI Web Search models by **@danny-avila** in [#6313](https://github.com/danny-avila/LibreChat/pull/6313)
|
||||
- 🔗 feat: Agent Chain (Mixture-of-Agents) by **@danny-avila** in [#6374](https://github.com/danny-avila/LibreChat/pull/6374)
|
||||
- ⌛ feat: `initTimeout` for Slow Starting MCP Servers by **@perweij** in [#6383](https://github.com/danny-avila/LibreChat/pull/6383)
|
||||
- 🚀 feat: `S3` Integration for File handling and Image uploads by **@rubentalstra** in [#6142](https://github.com/danny-avila/LibreChat/pull/6142)
|
||||
- 🔒feat: Enable OpenID Auto-Redirect by **@leondape** in [#6066](https://github.com/danny-avila/LibreChat/pull/6066)
|
||||
- 🚀 feat: Integrate `Azure Blob Storage` for file handling and image uploads by **@rubentalstra** in [#6153](https://github.com/danny-avila/LibreChat/pull/6153)
|
||||
- 🚀 feat: Add support for custom `AWS` endpoint in `S3` by **@rubentalstra** in [#6431](https://github.com/danny-avila/LibreChat/pull/6431)
|
||||
- 🚀 feat: Add support for LDAP STARTTLS in LDAP authentication by **@rubentalstra** in [#6438](https://github.com/danny-avila/LibreChat/pull/6438)
|
||||
- 🚀 feat: Refactor schema exports and update package version to 0.0.4 by **@rubentalstra** in [#6455](https://github.com/danny-avila/LibreChat/pull/6455)
|
||||
- 🔼 feat: Add Auto Submit For URL Query Params by **@mjaverto** in [#6440](https://github.com/danny-avila/LibreChat/pull/6440)
|
||||
- 🛠 feat: Enhance Redis Integration, Rate Limiters & Log Headers by **@danny-avila** in [#6462](https://github.com/danny-avila/LibreChat/pull/6462)
|
||||
- 💵 feat: Add Automatic Balance Refill by **@rubentalstra** in [#6452](https://github.com/danny-avila/LibreChat/pull/6452)
|
||||
- 🗣️ feat: add support for gpt-4o-transcribe models by **@berry-13** in [#6483](https://github.com/danny-avila/LibreChat/pull/6483)
|
||||
- 🎨 feat: UI Refresh for Enhanced UX by **@berry-13** in [#6346](https://github.com/danny-avila/LibreChat/pull/6346)
|
||||
- 🌍 feat: Add support for Hungarian language localization by **@rubentalstra** in [#6508](https://github.com/danny-avila/LibreChat/pull/6508)
|
||||
- 🚀 feat: Add Gemini 2.5 Token/Context Values, Increase Max Possible Output to 64k by **@danny-avila** in [#6563](https://github.com/danny-avila/LibreChat/pull/6563)
|
||||
- 🚀 feat: Enhance MCP Connections For Multi-User Support by **@danny-avila** in [#6610](https://github.com/danny-avila/LibreChat/pull/6610)
|
||||
- 🚀 feat: Enhance S3 URL Expiry with Refresh; fix: S3 File Deletion by **@danny-avila** in [#6647](https://github.com/danny-avila/LibreChat/pull/6647)
|
||||
- 🚀 feat: enhance UI components and refactor settings by **@berry-13** in [#6625](https://github.com/danny-avila/LibreChat/pull/6625)
|
||||
- 💬 feat: move TemporaryChat to the Header by **@berry-13** in [#6646](https://github.com/danny-avila/LibreChat/pull/6646)
|
||||
- 🚀 feat: Use Model Specs + Specific Endpoints, Limit Providers for Agents by **@danny-avila** in [#6650](https://github.com/danny-avila/LibreChat/pull/6650)
|
||||
- 🪙 feat: Sync Balance Config on Login by **@danny-avila** in [#6671](https://github.com/danny-avila/LibreChat/pull/6671)
|
||||
- 🔦 feat: MCP Support for Non-Agent Endpoints by **@danny-avila** in [#6775](https://github.com/danny-avila/LibreChat/pull/6775)
|
||||
- 🗃️ feat: Code Interpreter File Persistence between Sessions by **@danny-avila** in [#6790](https://github.com/danny-avila/LibreChat/pull/6790)
|
||||
- 🖥️ feat: Code Interpreter API for Non-Agent Endpoints by **@danny-avila** in [#6803](https://github.com/danny-avila/LibreChat/pull/6803)
|
||||
- ⚡ feat: Self-hosted Artifacts Static Bundler URL by **@danny-avila** in [#6827](https://github.com/danny-avila/LibreChat/pull/6827)
|
||||
- 🐳 feat: Add Jemalloc and UV to Docker Builds by **@danny-avila** in [#6836](https://github.com/danny-avila/LibreChat/pull/6836)
|
||||
- 🤖 feat: GPT-4.1 by **@danny-avila** in [#6880](https://github.com/danny-avila/LibreChat/pull/6880)
|
||||
- 👋 feat: remove Edge TTS by **@berry-13** in [#6885](https://github.com/danny-avila/LibreChat/pull/6885)
|
||||
- feat: nav optimization by **@berry-13** in [#5785](https://github.com/danny-avila/LibreChat/pull/5785)
|
||||
- 🗺️ feat: Add Parameter Location Mapping for OpenAPI actions by **@peeeteeer** in [#6858](https://github.com/danny-avila/LibreChat/pull/6858)
|
||||
- 🤖 feat: Support `o4-mini` and `o3` Models by **@danny-avila** in [#6928](https://github.com/danny-avila/LibreChat/pull/6928)
|
||||
- 🎨 feat: OpenAI Image Tools (GPT-Image-1) by **@danny-avila** in [#7079](https://github.com/danny-avila/LibreChat/pull/7079)
|
||||
- 🗓️ feat: Add Special Variables for Prompts & Agents, Prompt UI Improvements by **@danny-avila** in [#7123](https://github.com/danny-avila/LibreChat/pull/7123)
|
||||
|
||||
### 🌍 Internationalization
|
||||
|
||||
- 🌍 i18n: Add Thai Language Support and Update Translations by **@rubentalstra** in [#6219](https://github.com/danny-avila/LibreChat/pull/6219)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6220](https://github.com/danny-avila/LibreChat/pull/6220)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6240](https://github.com/danny-avila/LibreChat/pull/6240)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6241](https://github.com/danny-avila/LibreChat/pull/6241)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6277](https://github.com/danny-avila/LibreChat/pull/6277)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6414](https://github.com/danny-avila/LibreChat/pull/6414)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6505](https://github.com/danny-avila/LibreChat/pull/6505)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6530](https://github.com/danny-avila/LibreChat/pull/6530)
|
||||
- 🌍 i18n: Add Persian Localization Support by **@rubentalstra** in [#6669](https://github.com/danny-avila/LibreChat/pull/6669)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#6667](https://github.com/danny-avila/LibreChat/pull/6667)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7126](https://github.com/danny-avila/LibreChat/pull/7126)
|
||||
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7148](https://github.com/danny-avila/LibreChat/pull/7148)
|
||||
|
||||
### 👐 Accessibility
|
||||
|
||||
- 🎨 a11y: Update Model Spec Description Text by **@berry-13** in [#6294](https://github.com/danny-avila/LibreChat/pull/6294)
|
||||
- 🗑️ a11y: Add Accessible Name to Button for File Attachment Removal by **@kangabell** in [#6709](https://github.com/danny-avila/LibreChat/pull/6709)
|
||||
- ⌨️ a11y: enhance accessibility & visual consistency by **@berry-13** in [#6866](https://github.com/danny-avila/LibreChat/pull/6866)
|
||||
- 🙌 a11y: Searchbar/Conversations List Focus by **@danny-avila** in [#7096](https://github.com/danny-avila/LibreChat/pull/7096)
|
||||
- 👐 a11y: Improve Fork and SplitText Accessibility by **@danny-avila** in [#7147](https://github.com/danny-avila/LibreChat/pull/7147)
|
||||
|
||||
### 🔧 Fixes
|
||||
|
||||
- 🐛 fix: Avatar Type Definitions in Agent/Assistant Schemas by **@danny-avila** in [#6235](https://github.com/danny-avila/LibreChat/pull/6235)
|
||||
- 🔧 fix: MeiliSearch Field Error and Patch Incorrect Import by #6210 by **@rubentalstra** in [#6245](https://github.com/danny-avila/LibreChat/pull/6245)
|
||||
- 🔏 fix: Enhance Two-Factor Authentication by **@rubentalstra** in [#6247](https://github.com/danny-avila/LibreChat/pull/6247)
|
||||
- 🐛 fix: Await saveMessage in abortMiddleware to ensure proper execution by **@sh4shii** in [#6248](https://github.com/danny-avila/LibreChat/pull/6248)
|
||||
- 🔧 fix: Axios Proxy Usage And Bump `mongoose` by **@danny-avila** in [#6298](https://github.com/danny-avila/LibreChat/pull/6298)
|
||||
- 🔧 fix: comment out MCP servers to resolve service run issues by **@KunalScriptz** in [#6316](https://github.com/danny-avila/LibreChat/pull/6316)
|
||||
- 🔧 fix: Update Token Calculations and Mapping, MCP `env` Initialization by **@danny-avila** in [#6406](https://github.com/danny-avila/LibreChat/pull/6406)
|
||||
- 🐞 fix: Agent "Resend" Message Attachments + Source Icon Styling by **@danny-avila** in [#6408](https://github.com/danny-avila/LibreChat/pull/6408)
|
||||
- 🐛 fix: Prevent Crash on Duplicate Message ID by **@Odrec** in [#6392](https://github.com/danny-avila/LibreChat/pull/6392)
|
||||
- 🔐 fix: Invalid Key Length in 2FA Encryption by **@rubentalstra** in [#6432](https://github.com/danny-avila/LibreChat/pull/6432)
|
||||
- 🏗️ fix: Fix Agents Token Spend Race Conditions, Expand Test Coverage by **@danny-avila** in [#6480](https://github.com/danny-avila/LibreChat/pull/6480)
|
||||
- 🔃 fix: Draft Clearing, Claude Titles, Remove Default Vision Max Tokens by **@danny-avila** in [#6501](https://github.com/danny-avila/LibreChat/pull/6501)
|
||||
- 🔧 fix: Update username reference to use user.name in greeting display by **@rubentalstra** in [#6534](https://github.com/danny-avila/LibreChat/pull/6534)
|
||||
- 🔧 fix: S3 Download Stream with Key Extraction and Blob Storage Encoding for Vision by **@danny-avila** in [#6557](https://github.com/danny-avila/LibreChat/pull/6557)
|
||||
- 🔧 fix: Mistral type strictness for `usage` & update token values/windows by **@danny-avila** in [#6562](https://github.com/danny-avila/LibreChat/pull/6562)
|
||||
- 🔧 fix: Consolidate Text Parsing and TTS Edge Initialization by **@danny-avila** in [#6582](https://github.com/danny-avila/LibreChat/pull/6582)
|
||||
- 🔧 fix: Ensure continuation in image processing on base64 encoding from Blob Storage by **@danny-avila** in [#6619](https://github.com/danny-avila/LibreChat/pull/6619)
|
||||
- ✉️ fix: Fallback For User Name In Email Templates by **@danny-avila** in [#6620](https://github.com/danny-avila/LibreChat/pull/6620)
|
||||
- 🔧 fix: Azure Blob Integration and File Source References by **@rubentalstra** in [#6575](https://github.com/danny-avila/LibreChat/pull/6575)
|
||||
- 🐛 fix: Safeguard against undefined addedEndpoints by **@wipash** in [#6654](https://github.com/danny-avila/LibreChat/pull/6654)
|
||||
- 🤖 fix: Gemini 2.5 Vision Support by **@danny-avila** in [#6663](https://github.com/danny-avila/LibreChat/pull/6663)
|
||||
- 🔄 fix: Avatar & Error Handling Enhancements by **@danny-avila** in [#6687](https://github.com/danny-avila/LibreChat/pull/6687)
|
||||
- 🔧 fix: Chat Middleware, Zod Conversion, Auto-Save and S3 URL Refresh by **@danny-avila** in [#6720](https://github.com/danny-avila/LibreChat/pull/6720)
|
||||
- 🔧 fix: Agent Capability Checks & DocumentDB Compatibility for Agent Resource Removal by **@danny-avila** in [#6726](https://github.com/danny-avila/LibreChat/pull/6726)
|
||||
- 🔄 fix: Improve audio MIME type detection and handling by **@berry-13** in [#6707](https://github.com/danny-avila/LibreChat/pull/6707)
|
||||
- 🪺 fix: Update Role Handling due to New Schema Shape by **@danny-avila** in [#6774](https://github.com/danny-avila/LibreChat/pull/6774)
|
||||
- 🗨️ fix: Show ModelSpec Greeting by **@berry-13** in [#6770](https://github.com/danny-avila/LibreChat/pull/6770)
|
||||
- 🔧 fix: Keyv and Proxy Issues, and More Memory Optimizations by **@danny-avila** in [#6867](https://github.com/danny-avila/LibreChat/pull/6867)
|
||||
- ✨ fix: Implement dynamic text sizing for greeting and name display by **@berry-13** in [#6833](https://github.com/danny-avila/LibreChat/pull/6833)
|
||||
- 📝 fix: Mistral OCR Image Support and Azure Agent Titles by **@danny-avila** in [#6901](https://github.com/danny-avila/LibreChat/pull/6901)
|
||||
- 📢 fix: Invalid `engineTTS` and Conversation State on Navigation by **@berry-13** in [#6904](https://github.com/danny-avila/LibreChat/pull/6904)
|
||||
- 🛠️ fix: Improve Accessibility and Display of Conversation Menu by **@danny-avila** in [#6913](https://github.com/danny-avila/LibreChat/pull/6913)
|
||||
- 🔧 fix: Agent Resource Form, Convo Menu Style, Ensure Draft Clears on Submission by **@danny-avila** in [#6925](https://github.com/danny-avila/LibreChat/pull/6925)
|
||||
- 🔀 fix: MCP Improvements, Auto-Save Drafts, Artifact Markup by **@danny-avila** in [#7040](https://github.com/danny-avila/LibreChat/pull/7040)
|
||||
- 🐋 fix: Improve Deepseek Compatbility by **@danny-avila** in [#7132](https://github.com/danny-avila/LibreChat/pull/7132)
|
||||
- 🐙 fix: Add Redis Ping Interval to Prevent Connection Drops by **@peeeteeer** in [#7127](https://github.com/danny-avila/LibreChat/pull/7127)
|
||||
|
||||
### ⚙️ Other Changes
|
||||
|
||||
- 📦 refactor: Move DB Models to `@librechat/data-schemas` by **@rubentalstra** in [#6210](https://github.com/danny-avila/LibreChat/pull/6210)
|
||||
- 📦 chore: Patch `axios` to address CVE-2025-27152 by **@danny-avila** in [#6222](https://github.com/danny-avila/LibreChat/pull/6222)
|
||||
- ⚠️ refactor: Use Error Content Part Instead Of Throwing Error for Agents by **@danny-avila** in [#6262](https://github.com/danny-avila/LibreChat/pull/6262)
|
||||
- 🏃♂️ refactor: Improve Agent Run Context & Misc. Changes by **@danny-avila** in [#6448](https://github.com/danny-avila/LibreChat/pull/6448)
|
||||
- 📝 docs: librechat.example.yaml by **@ineiti** in [#6442](https://github.com/danny-avila/LibreChat/pull/6442)
|
||||
- 🏃♂️ refactor: More Agent Context Improvements during Run by **@danny-avila** in [#6477](https://github.com/danny-avila/LibreChat/pull/6477)
|
||||
- 🔃 refactor: Allow streaming for `o1` models by **@danny-avila** in [#6509](https://github.com/danny-avila/LibreChat/pull/6509)
|
||||
- 🔧 chore: `Vite` Plugin Upgrades & Config Optimizations by **@rubentalstra** in [#6547](https://github.com/danny-avila/LibreChat/pull/6547)
|
||||
- 🔧 refactor: Consolidate Logging, Model Selection & Actions Optimizations, Minor Fixes by **@danny-avila** in [#6553](https://github.com/danny-avila/LibreChat/pull/6553)
|
||||
- 🎨 style: Address Minor UI Refresh Issues by **@berry-13** in [#6552](https://github.com/danny-avila/LibreChat/pull/6552)
|
||||
- 🔧 refactor: Enhance Model & Endpoint Configurations with Global Indicators 🌍 by **@berry-13** in [#6578](https://github.com/danny-avila/LibreChat/pull/6578)
|
||||
- 💬 style: Chat UI, Greeting, and Message adjustments by **@berry-13** in [#6612](https://github.com/danny-avila/LibreChat/pull/6612)
|
||||
- ⚡ refactor: DocumentDB Compatibility for Balance Updates by **@danny-avila** in [#6673](https://github.com/danny-avila/LibreChat/pull/6673)
|
||||
- 🧹 chore: Update ESLint rules for React hooks by **@rubentalstra** in [#6685](https://github.com/danny-avila/LibreChat/pull/6685)
|
||||
- 🪙 chore: Update Gemini Pricing by **@RedwindA** in [#6731](https://github.com/danny-avila/LibreChat/pull/6731)
|
||||
- 🪺 refactor: Nest Permission fields for Roles by **@rubentalstra** in [#6487](https://github.com/danny-avila/LibreChat/pull/6487)
|
||||
- 📦 chore: Update `caniuse-lite` dependency to version 1.0.30001706 by **@rubentalstra** in [#6482](https://github.com/danny-avila/LibreChat/pull/6482)
|
||||
- ⚙️ refactor: OAuth Flow Signal, Type Safety, Tool Progress & Updated Packages by **@danny-avila** in [#6752](https://github.com/danny-avila/LibreChat/pull/6752)
|
||||
- 📦 chore: bump vite from 6.2.3 to 6.2.5 by **@dependabot[bot]** in [#6745](https://github.com/danny-avila/LibreChat/pull/6745)
|
||||
- 💾 chore: Enhance Local Storage Handling and Update MCP SDK by **@danny-avila** in [#6809](https://github.com/danny-avila/LibreChat/pull/6809)
|
||||
- 🤖 refactor: Improve Agents Memory Usage, Bump Keyv, Grok 3 by **@danny-avila** in [#6850](https://github.com/danny-avila/LibreChat/pull/6850)
|
||||
- 💾 refactor: Enhance Memory In Image Encodings & Client Disposal by **@danny-avila** in [#6852](https://github.com/danny-avila/LibreChat/pull/6852)
|
||||
- 🔁 refactor: Token Event Handler and Standardize `maxTokens` Key by **@danny-avila** in [#6886](https://github.com/danny-avila/LibreChat/pull/6886)
|
||||
- 🔍 refactor: Search & Message Retrieval by **@berry-13** in [#6903](https://github.com/danny-avila/LibreChat/pull/6903)
|
||||
- 🎨 style: standardize dropdown styling & fix z-Index layering by **@berry-13** in [#6939](https://github.com/danny-avila/LibreChat/pull/6939)
|
||||
- 📙 docs: CONTRIBUTING.md by **@dblock** in [#6831](https://github.com/danny-avila/LibreChat/pull/6831)
|
||||
- 🧭 refactor: Modernize Nav/Header by **@danny-avila** in [#7094](https://github.com/danny-avila/LibreChat/pull/7094)
|
||||
- 🪶 refactor: Chat Input Focus for Conversation Navigations & ChatForm Optimizations by **@danny-avila** in [#7100](https://github.com/danny-avila/LibreChat/pull/7100)
|
||||
- 🔃 refactor: Streamline Navigation, Message Loading UX by **@danny-avila** in [#7118](https://github.com/danny-avila/LibreChat/pull/7118)
|
||||
- 📜 docs: Unreleased changelog by **@github-actions[bot]** in [#6265](https://github.com/danny-avila/LibreChat/pull/6265)
|
||||
|
||||
|
||||
|
||||
[See full release details][release-v0.7.8-rc1]
|
||||
|
||||
[release-v0.7.8-rc1]: https://github.com/danny-avila/LibreChat/releases/tag/v0.7.8-rc1
|
||||
- 🔄 chore: Enforce 18next Language Keys by **@rubentalstra** in [#5803](https://github.com/danny-avila/LibreChat/pull/5803)
|
||||
- 🔃 refactor: Parent Message ID Handling on Error, Update Translations, Bump Agents by **@danny-avila** in [#5833](https://github.com/danny-avila/LibreChat/pull/5833)
|
||||
|
||||
---
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
# v0.7.8
|
||||
# v0.7.7
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
|
||||
# Install jemalloc
|
||||
RUN apk add --no-cache jemalloc
|
||||
RUN apk add --no-cache python3 py3-pip uv
|
||||
|
||||
# Set environment variable to use jemalloc
|
||||
ENV LD_PRELOAD=/usr/lib/libjemalloc.so.2
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Dockerfile.multi
|
||||
# v0.7.8
|
||||
# v0.7.7
|
||||
|
||||
# Base for all builds
|
||||
FROM node:20-alpine AS base-min
|
||||
|
||||
@@ -71,11 +71,6 @@
|
||||
- [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
|
||||
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions
|
||||
|
||||
- 🔍 **Web Search**:
|
||||
- Search the internet and retrieve relevant information to enhance your AI context
|
||||
- Combines search providers, content scrapers, and result rerankers for optimal results
|
||||
- **[Learn More →](https://www.librechat.ai/docs/features/web_search)**
|
||||
|
||||
- 🪄 **Generative UI with Code Artifacts**:
|
||||
- [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class AnthropicClient extends BaseClient {
|
||||
this.message_delta;
|
||||
/** Whether the model is part of the Claude 3 Family
|
||||
* @type {boolean} */
|
||||
this.isClaudeLatest;
|
||||
this.isClaude3;
|
||||
/** Whether to use Messages API or Completions API
|
||||
* @type {boolean} */
|
||||
this.useMessages;
|
||||
@@ -116,8 +116,7 @@ class AnthropicClient extends BaseClient {
|
||||
);
|
||||
|
||||
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
|
||||
this.isClaudeLatest =
|
||||
/claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch);
|
||||
this.isClaude3 = modelMatch.includes('claude-3');
|
||||
this.isLegacyOutput = !(
|
||||
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
|
||||
);
|
||||
@@ -131,7 +130,7 @@ class AnthropicClient extends BaseClient {
|
||||
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
|
||||
}
|
||||
|
||||
this.useMessages = this.isClaudeLatest || !!this.options.attachments;
|
||||
this.useMessages = this.isClaude3 || !!this.options.attachments;
|
||||
|
||||
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
|
||||
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||
@@ -397,13 +396,13 @@ class AnthropicClient extends BaseClient {
|
||||
const formattedMessages = orderedMessages.map((message, i) => {
|
||||
const formattedMessage = this.useMessages
|
||||
? formatMessage({
|
||||
message,
|
||||
endpoint: EModelEndpoint.anthropic,
|
||||
})
|
||||
message,
|
||||
endpoint: EModelEndpoint.anthropic,
|
||||
})
|
||||
: {
|
||||
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
content: message?.content ?? message.text,
|
||||
};
|
||||
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
|
||||
content: message?.content ?? message.text,
|
||||
};
|
||||
|
||||
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
|
||||
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
|
||||
@@ -655,10 +654,7 @@ class AnthropicClient extends BaseClient {
|
||||
);
|
||||
};
|
||||
|
||||
if (
|
||||
/claude-[3-9]/.test(this.modelOptions.model) ||
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model)
|
||||
) {
|
||||
if (this.modelOptions.model.includes('claude-3')) {
|
||||
await buildMessagesPayload();
|
||||
processTokens();
|
||||
return {
|
||||
@@ -684,7 +680,7 @@ class AnthropicClient extends BaseClient {
|
||||
}
|
||||
|
||||
getCompletion() {
|
||||
logger.debug("AnthropicClient doesn't use getCompletion (all handled in sendCompletion)");
|
||||
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -892,7 +888,7 @@ class AnthropicClient extends BaseClient {
|
||||
}
|
||||
|
||||
getBuildMessagesOptions() {
|
||||
logger.debug("AnthropicClient doesn't use getBuildMessagesOptions");
|
||||
logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions');
|
||||
}
|
||||
|
||||
getEncoding() {
|
||||
|
||||
@@ -63,15 +63,15 @@ class BaseClient {
|
||||
}
|
||||
|
||||
setOptions() {
|
||||
throw new Error("Method 'setOptions' must be implemented.");
|
||||
throw new Error('Method \'setOptions\' must be implemented.');
|
||||
}
|
||||
|
||||
async getCompletion() {
|
||||
throw new Error("Method 'getCompletion' must be implemented.");
|
||||
throw new Error('Method \'getCompletion\' must be implemented.');
|
||||
}
|
||||
|
||||
async sendCompletion() {
|
||||
throw new Error("Method 'sendCompletion' must be implemented.");
|
||||
throw new Error('Method \'sendCompletion\' must be implemented.');
|
||||
}
|
||||
|
||||
getSaveOptions() {
|
||||
@@ -237,11 +237,11 @@ class BaseClient {
|
||||
const userMessage = opts.isEdited
|
||||
? this.currentMessages[this.currentMessages.length - 2]
|
||||
: this.createUserMessage({
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId,
|
||||
text: message,
|
||||
});
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
conversationId,
|
||||
text: message,
|
||||
});
|
||||
|
||||
if (typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({
|
||||
|
||||
@@ -140,7 +140,8 @@ class GoogleClient extends BaseClient {
|
||||
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||
|
||||
/** @type {boolean} Whether using a "GenerativeAI" Model */
|
||||
this.isGenerativeModel = /gemini|learnlm|gemma/.test(this.modelOptions.model);
|
||||
this.isGenerativeModel =
|
||||
this.modelOptions.model.includes('gemini') || this.modelOptions.model.includes('learnlm');
|
||||
|
||||
this.maxContextTokens =
|
||||
this.options.maxContextTokens ??
|
||||
|
||||
@@ -475,9 +475,7 @@ class OpenAIClient extends BaseClient {
|
||||
promptPrefix = this.augmentedPrompt + promptPrefix;
|
||||
}
|
||||
|
||||
const noSystemModelRegex = /\b(o1-preview|o1-mini)\b/i.test(this.modelOptions.model);
|
||||
|
||||
if (promptPrefix && !noSystemModelRegex) {
|
||||
if (promptPrefix && this.isOmni !== true) {
|
||||
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
|
||||
instructions = {
|
||||
role: 'system',
|
||||
@@ -505,7 +503,7 @@ class OpenAIClient extends BaseClient {
|
||||
};
|
||||
|
||||
/** EXPERIMENTAL */
|
||||
if (promptPrefix && noSystemModelRegex) {
|
||||
if (promptPrefix && this.isOmni === true) {
|
||||
const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user');
|
||||
if (lastUserMessageIndex !== -1) {
|
||||
if (Array.isArray(payload[lastUserMessageIndex].content)) {
|
||||
@@ -1229,9 +1227,9 @@ ${convo}
|
||||
|
||||
opts.baseURL = this.langchainProxy
|
||||
? constructAzureURL({
|
||||
baseURL: this.langchainProxy,
|
||||
azureOptions: this.azure,
|
||||
})
|
||||
baseURL: this.langchainProxy,
|
||||
azureOptions: this.azure,
|
||||
})
|
||||
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
|
||||
|
||||
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
|
||||
@@ -1285,14 +1283,6 @@ ${convo}
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
|
||||
if (
|
||||
(this.options.endpoint === EModelEndpoint.openAI ||
|
||||
this.options.endpoint === EModelEndpoint.azureOpenAI) &&
|
||||
modelOptions.stream === true
|
||||
) {
|
||||
modelOptions.stream_options = { include_usage: true };
|
||||
}
|
||||
|
||||
if (this.options.addParams && typeof this.options.addParams === 'object') {
|
||||
const addParams = { ...this.options.addParams };
|
||||
modelOptions = {
|
||||
@@ -1395,6 +1385,12 @@ ${convo}
|
||||
...modelOptions,
|
||||
stream: true,
|
||||
};
|
||||
if (
|
||||
this.options.endpoint === EModelEndpoint.openAI ||
|
||||
this.options.endpoint === EModelEndpoint.azureOpenAI
|
||||
) {
|
||||
params.stream_options = { include_usage: true };
|
||||
}
|
||||
const stream = await openai.beta.chat.completions
|
||||
.stream(params)
|
||||
.on('abort', () => {
|
||||
|
||||
@@ -15,7 +15,7 @@ describe('AnthropicClient', () => {
|
||||
{
|
||||
role: 'user',
|
||||
isCreatedByUser: true,
|
||||
text: "What's up",
|
||||
text: 'What\'s up',
|
||||
messageId: '3',
|
||||
parentMessageId: '2',
|
||||
},
|
||||
@@ -170,7 +170,7 @@ describe('AnthropicClient', () => {
|
||||
client.options.modelLabel = 'Claude-2';
|
||||
const result = await client.buildMessages(messages, parentMessageId);
|
||||
const { prompt } = result;
|
||||
expect(prompt).toContain("Human's name: John");
|
||||
expect(prompt).toContain('Human\'s name: John');
|
||||
expect(prompt).toContain('You are Claude-2');
|
||||
});
|
||||
});
|
||||
@@ -244,64 +244,6 @@ describe('AnthropicClient', () => {
|
||||
);
|
||||
});
|
||||
|
||||
describe('Claude 4 model headers', () => {
|
||||
it('should add "prompt-caching" beta header for claude-sonnet-4 model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
};
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31',
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" beta header for claude-opus-4 model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-opus-4-20250514',
|
||||
};
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31',
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" beta header for claude-4-sonnet model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-4-sonnet-20250514',
|
||||
};
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31',
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" beta header for claude-4-opus model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-4-opus-20250514',
|
||||
};
|
||||
client.setOptions({ modelOptions, promptCache: true });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should not add beta header for claude-3-5-sonnet-latest model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
@@ -787,223 +729,4 @@ describe('AnthropicClient', () => {
|
||||
expect(capturedOptions).toHaveProperty('topK', 10);
|
||||
expect(capturedOptions).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
describe('isClaudeLatest', () => {
|
||||
it('should set isClaudeLatest to true for claude-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-sonnet-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to true for claude-3.5 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3.5-sonnet-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to true for claude-sonnet-4 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-sonnet-4-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to true for claude-opus-4 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-opus-4-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to true for claude-3.5-haiku models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3.5-haiku-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(true);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to false for claude-2 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-2',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(false);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to false for claude-instant models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-instant',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(false);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to false for claude-sonnet-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-sonnet-3-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(false);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to false for claude-opus-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-opus-3-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(false);
|
||||
});
|
||||
|
||||
it('should set isClaudeLatest to false for claude-haiku-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-haiku-3-20240229',
|
||||
},
|
||||
});
|
||||
expect(client.isClaudeLatest).toBe(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('configureReasoning', () => {
|
||||
it('should enable thinking for claude-opus-4 and claude-sonnet-4 models', async () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
// Create a mock async generator function
|
||||
async function* mockAsyncGenerator() {
|
||||
yield { type: 'message_start', message: { usage: {} } };
|
||||
yield { delta: { text: 'Test response' } };
|
||||
yield { type: 'message_delta', usage: {} };
|
||||
}
|
||||
|
||||
// Mock createResponse to return the async generator
|
||||
jest.spyOn(client, 'createResponse').mockImplementation(() => {
|
||||
return mockAsyncGenerator();
|
||||
});
|
||||
|
||||
// Test claude-opus-4
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-opus-4-20250514',
|
||||
},
|
||||
thinking: true,
|
||||
thinkingBudget: 2000,
|
||||
});
|
||||
|
||||
let capturedOptions = null;
|
||||
jest.spyOn(client, 'getClient').mockImplementation((options) => {
|
||||
capturedOptions = options;
|
||||
return {};
|
||||
});
|
||||
|
||||
const payload = [{ role: 'user', content: 'Test message' }];
|
||||
await client.sendCompletion(payload, {});
|
||||
|
||||
expect(capturedOptions).toHaveProperty('thinking');
|
||||
expect(capturedOptions.thinking).toEqual({
|
||||
type: 'enabled',
|
||||
budget_tokens: 2000,
|
||||
});
|
||||
|
||||
// Test claude-sonnet-4
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
},
|
||||
thinking: true,
|
||||
thinkingBudget: 2000,
|
||||
});
|
||||
|
||||
await client.sendCompletion(payload, {});
|
||||
|
||||
expect(capturedOptions).toHaveProperty('thinking');
|
||||
expect(capturedOptions.thinking).toEqual({
|
||||
type: 'enabled',
|
||||
budget_tokens: 2000,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Claude Model Tests', () => {
|
||||
it('should handle Claude 3 and 4 series models correctly', () => {
|
||||
const client = new AnthropicClient('test-key');
|
||||
// Claude 3 series models
|
||||
const claude3Models = [
|
||||
'claude-3-opus-20240229',
|
||||
'claude-3-sonnet-20240229',
|
||||
'claude-3-haiku-20240307',
|
||||
'claude-3-5-sonnet-20240620',
|
||||
'claude-3-5-haiku-20240620',
|
||||
'claude-3.5-sonnet-20240620',
|
||||
'claude-3.5-haiku-20240620',
|
||||
'claude-3.7-sonnet-20240620',
|
||||
'claude-3.7-haiku-20240620',
|
||||
'anthropic/claude-3-opus-20240229',
|
||||
'claude-3-opus-20240229/anthropic',
|
||||
];
|
||||
|
||||
// Claude 4 series models
|
||||
const claude4Models = [
|
||||
'claude-sonnet-4-20250514',
|
||||
'claude-opus-4-20250514',
|
||||
'claude-4-sonnet-20250514',
|
||||
'claude-4-opus-20250514',
|
||||
'anthropic/claude-sonnet-4-20250514',
|
||||
'claude-sonnet-4-20250514/anthropic',
|
||||
];
|
||||
|
||||
// Test Claude 3 series
|
||||
claude3Models.forEach((model) => {
|
||||
client.setOptions({ modelOptions: { model } });
|
||||
expect(
|
||||
/claude-[3-9]/.test(client.modelOptions.model) ||
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
// Test Claude 4 series
|
||||
claude4Models.forEach((model) => {
|
||||
client.setOptions({ modelOptions: { model } });
|
||||
expect(
|
||||
/claude-[3-9]/.test(client.modelOptions.model) ||
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
|
||||
).toBe(true);
|
||||
});
|
||||
|
||||
// Test non-Claude 3/4 models
|
||||
const nonClaudeModels = ['claude-2', 'claude-instant', 'gpt-4', 'gpt-3.5-turbo'];
|
||||
|
||||
nonClaudeModels.forEach((model) => {
|
||||
client.setOptions({ modelOptions: { model } });
|
||||
expect(
|
||||
/claude-[3-9]/.test(client.modelOptions.model) ||
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
|
||||
).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -30,7 +30,7 @@ const DEFAULT_IMAGE_EDIT_DESCRIPTION =
|
||||
|
||||
When to use \`image_edit_oai\`:
|
||||
- The user wants to modify, extend, or remix one **or more** uploaded images, either:
|
||||
- Previously generated, or in the current request (both to be included in the \`image_ids\` array).
|
||||
- Previously generated, or in the current request (both to be included in the \`image_ids\` array).
|
||||
- Always when the user refers to uploaded images for editing, enhancement, remixing, style transfer, or combining elements.
|
||||
- Any current or existing images are to be used as visual guides.
|
||||
- If there are any files in the current request, they are more likely than not expected as references for image edit requests.
|
||||
|
||||
@@ -43,39 +43,9 @@ class TavilySearchResults extends Tool {
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Whether to include answers in the search results. Default is False.'),
|
||||
include_raw_content: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe('Whether to include raw content in the search results. Default is False.'),
|
||||
include_domains: z
|
||||
.array(z.string())
|
||||
.optional()
|
||||
.describe('A list of domains to specifically include in the search results.'),
|
||||
exclude_domains: z
|
||||
.array(z.string())
|
||||
.optional()
|
||||
.describe('A list of domains to specifically exclude from the search results.'),
|
||||
topic: z
|
||||
.enum(['general', 'news', 'finance'])
|
||||
.optional()
|
||||
.describe(
|
||||
'The category of the search. Use news ONLY if query SPECIFCALLY mentions the word "news".',
|
||||
),
|
||||
time_range: z
|
||||
.enum(['day', 'week', 'month', 'year', 'd', 'w', 'm', 'y'])
|
||||
.optional()
|
||||
.describe('The time range back from the current date to filter results.'),
|
||||
days: z
|
||||
.number()
|
||||
.min(1)
|
||||
.optional()
|
||||
.describe('Number of days back from the current date to include. Only if topic is news.'),
|
||||
include_image_descriptions: z
|
||||
.boolean()
|
||||
.optional()
|
||||
.describe(
|
||||
'When include_images is true, also add a descriptive text for each image. Default is false.',
|
||||
),
|
||||
// include_raw_content: z.boolean().optional().describe('Whether to include raw content in the search results. Default is False.'),
|
||||
// include_domains: z.array(z.string()).optional().describe('A list of domains to specifically include in the search results.'),
|
||||
// exclude_domains: z.array(z.string()).optional().describe('A list of domains to specifically exclude from the search results.'),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
30
api/app/clients/tools/util/addOpenAPISpecs.js
Normal file
30
api/app/clients/tools/util/addOpenAPISpecs.js
Normal file
@@ -0,0 +1,30 @@
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
|
||||
function transformSpec(input) {
|
||||
return {
|
||||
name: input.name_for_human,
|
||||
pluginKey: input.name_for_model,
|
||||
description: input.description_for_human,
|
||||
icon: input?.logo_url ?? 'https://placehold.co/70x70.png',
|
||||
// TODO: add support for authentication
|
||||
isAuthRequired: 'false',
|
||||
authConfig: [],
|
||||
};
|
||||
}
|
||||
|
||||
async function addOpenAPISpecs(availableTools) {
|
||||
try {
|
||||
const specs = (await loadSpecs({})).map(transformSpec);
|
||||
if (specs.length > 0) {
|
||||
return [...specs, ...availableTools];
|
||||
}
|
||||
return availableTools;
|
||||
} catch (error) {
|
||||
return availableTools;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
transformSpec,
|
||||
addOpenAPISpecs,
|
||||
};
|
||||
76
api/app/clients/tools/util/addOpenAPISpecs.spec.js
Normal file
76
api/app/clients/tools/util/addOpenAPISpecs.spec.js
Normal file
@@ -0,0 +1,76 @@
|
||||
const { addOpenAPISpecs, transformSpec } = require('./addOpenAPISpecs');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
|
||||
jest.mock('./loadSpecs');
|
||||
jest.mock('../dynamic/OpenAPIPlugin');
|
||||
|
||||
describe('transformSpec', () => {
|
||||
it('should transform input spec to a desired format', () => {
|
||||
const input = {
|
||||
name_for_human: 'Human Name',
|
||||
name_for_model: 'Model Name',
|
||||
description_for_human: 'Human Description',
|
||||
logo_url: 'https://example.com/logo.png',
|
||||
};
|
||||
|
||||
const expectedOutput = {
|
||||
name: 'Human Name',
|
||||
pluginKey: 'Model Name',
|
||||
description: 'Human Description',
|
||||
icon: 'https://example.com/logo.png',
|
||||
isAuthRequired: 'false',
|
||||
authConfig: [],
|
||||
};
|
||||
|
||||
expect(transformSpec(input)).toEqual(expectedOutput);
|
||||
});
|
||||
|
||||
it('should use default icon if logo_url is not provided', () => {
|
||||
const input = {
|
||||
name_for_human: 'Human Name',
|
||||
name_for_model: 'Model Name',
|
||||
description_for_human: 'Human Description',
|
||||
};
|
||||
|
||||
const expectedOutput = {
|
||||
name: 'Human Name',
|
||||
pluginKey: 'Model Name',
|
||||
description: 'Human Description',
|
||||
icon: 'https://placehold.co/70x70.png',
|
||||
isAuthRequired: 'false',
|
||||
authConfig: [],
|
||||
};
|
||||
|
||||
expect(transformSpec(input)).toEqual(expectedOutput);
|
||||
});
|
||||
});
|
||||
|
||||
describe('addOpenAPISpecs', () => {
|
||||
it('should add specs to available tools', async () => {
|
||||
const availableTools = ['Tool1', 'Tool2'];
|
||||
const specs = [
|
||||
{
|
||||
name_for_human: 'Human Name',
|
||||
name_for_model: 'Model Name',
|
||||
description_for_human: 'Human Description',
|
||||
logo_url: 'https://example.com/logo.png',
|
||||
},
|
||||
];
|
||||
|
||||
loadSpecs.mockResolvedValue(specs);
|
||||
createOpenAPIPlugin.mockReturnValue('Plugin');
|
||||
|
||||
const result = await addOpenAPISpecs(availableTools);
|
||||
expect(result).toEqual([...specs.map(transformSpec), ...availableTools]);
|
||||
});
|
||||
|
||||
it('should return available tools if specs loading fails', async () => {
|
||||
const availableTools = ['Tool1', 'Tool2'];
|
||||
|
||||
loadSpecs.mockRejectedValue(new Error('Failed to load specs'));
|
||||
|
||||
const result = await addOpenAPISpecs(availableTools);
|
||||
expect(result).toEqual(availableTools);
|
||||
});
|
||||
});
|
||||
@@ -1,13 +1,7 @@
|
||||
const { SerpAPI } = require('@langchain/community/tools/serpapi');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
EToolResources,
|
||||
loadWebSearchAuth,
|
||||
replaceSpecialVars,
|
||||
} = require('librechat-data-provider');
|
||||
const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
|
||||
const { Tools, Constants, EToolResources } = require('librechat-data-provider');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const {
|
||||
availableTools,
|
||||
@@ -30,6 +24,7 @@ const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/pro
|
||||
const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSearch');
|
||||
const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { createMCPTool } = require('~/server/services/MCP');
|
||||
const { loadSpecs } = require('./loadSpecs');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const mcpToolPattern = new RegExp(`^.+${Constants.mcp_delimiter}.+$`);
|
||||
@@ -144,6 +139,7 @@ const loadTools = async ({
|
||||
agent,
|
||||
model,
|
||||
endpoint,
|
||||
useSpecs,
|
||||
tools = [],
|
||||
options = {},
|
||||
functions = true,
|
||||
@@ -236,6 +232,7 @@ const loadTools = async ({
|
||||
|
||||
/** @type {Record<string, string>} */
|
||||
const toolContextMap = {};
|
||||
const remainingTools = [];
|
||||
const appTools = options.req?.app?.locals?.availableTools ?? {};
|
||||
|
||||
for (const tool of tools) {
|
||||
@@ -268,33 +265,6 @@ const loadTools = async ({
|
||||
return createFileSearchTool({ req: options.req, files, entity_id: agent?.id });
|
||||
};
|
||||
continue;
|
||||
} else if (tool === Tools.web_search) {
|
||||
const webSearchConfig = options?.req?.app?.locals?.webSearch;
|
||||
const result = await loadWebSearchAuth({
|
||||
userId: user,
|
||||
loadAuthValues,
|
||||
webSearchConfig,
|
||||
});
|
||||
const { onSearchResults, onGetHighlights } = options?.[Tools.web_search] ?? {};
|
||||
requestedTools[tool] = async () => {
|
||||
toolContextMap[tool] = `# \`${tool}\`:
|
||||
Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
1. **Execute immediately without preface** when using \`${tool}\`.
|
||||
2. **After the search, begin with a brief summary** that directly addresses the query without headers or explaining your process.
|
||||
3. **Structure your response clearly** using Markdown formatting (Level 2 headers for sections, lists for multiple points, tables for comparisons).
|
||||
4. **Cite sources properly** according to the citation anchor format, utilizing group anchors when appropriate.
|
||||
5. **Tailor your approach to the query type** (academic, news, coding, etc.) while maintaining an expert, journalistic, unbiased tone.
|
||||
6. **Provide comprehensive information** with specific details, examples, and as much relevant context as possible from search results.
|
||||
7. **Avoid moralizing language.**
|
||||
`.trim();
|
||||
return createSearchTool({
|
||||
...result.authResult,
|
||||
onSearchResults,
|
||||
onGetHighlights,
|
||||
logger,
|
||||
});
|
||||
};
|
||||
continue;
|
||||
} else if (tool && appTools[tool] && mcpToolPattern.test(tool)) {
|
||||
requestedTools[tool] = async () =>
|
||||
createMCPTool({
|
||||
@@ -322,6 +292,30 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
requestedTools[tool] = toolInstance;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (functions === true) {
|
||||
remainingTools.push(tool);
|
||||
}
|
||||
}
|
||||
|
||||
let specs = null;
|
||||
if (useSpecs === true && functions === true && remainingTools.length > 0) {
|
||||
specs = await loadSpecs({
|
||||
llm: model,
|
||||
user,
|
||||
message: options.message,
|
||||
memory: options.memory,
|
||||
signal: options.signal,
|
||||
tools: remainingTools,
|
||||
map: true,
|
||||
verbose: false,
|
||||
});
|
||||
}
|
||||
|
||||
for (const tool of remainingTools) {
|
||||
if (specs && specs[tool]) {
|
||||
requestedTools[tool] = specs[tool];
|
||||
}
|
||||
}
|
||||
|
||||
if (returnMap) {
|
||||
|
||||
117
api/app/clients/tools/util/loadSpecs.js
Normal file
117
api/app/clients/tools/util/loadSpecs.js
Normal file
@@ -0,0 +1,117 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { z } = require('zod');
|
||||
const { logger } = require('~/config');
|
||||
const { createOpenAPIPlugin } = require('~/app/clients/tools/dynamic/OpenAPIPlugin');
|
||||
|
||||
// The minimum Manifest definition
|
||||
const ManifestDefinition = z.object({
|
||||
schema_version: z.string().optional(),
|
||||
name_for_human: z.string(),
|
||||
name_for_model: z.string(),
|
||||
description_for_human: z.string(),
|
||||
description_for_model: z.string(),
|
||||
auth: z.object({}).optional(),
|
||||
api: z.object({
|
||||
// Spec URL or can be the filename of the OpenAPI spec yaml file,
|
||||
// located in api\app\clients\tools\.well-known\openapi
|
||||
url: z.string(),
|
||||
type: z.string().optional(),
|
||||
is_user_authenticated: z.boolean().nullable().optional(),
|
||||
has_user_authentication: z.boolean().nullable().optional(),
|
||||
}),
|
||||
// use to override any params that the LLM will consistently get wrong
|
||||
params: z.object({}).optional(),
|
||||
logo_url: z.string().optional(),
|
||||
contact_email: z.string().optional(),
|
||||
legal_info_url: z.string().optional(),
|
||||
});
|
||||
|
||||
function validateJson(json) {
|
||||
try {
|
||||
return ManifestDefinition.parse(json);
|
||||
} catch (error) {
|
||||
logger.debug('[validateJson] manifest parsing error', error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// omit the LLM to return the well known jsons as objects
|
||||
async function loadSpecs({ llm, user, message, tools = [], map = false, memory, signal }) {
|
||||
const directoryPath = path.join(__dirname, '..', '.well-known');
|
||||
let files = [];
|
||||
|
||||
for (let i = 0; i < tools.length; i++) {
|
||||
const filePath = path.join(directoryPath, tools[i] + '.json');
|
||||
|
||||
try {
|
||||
// If the access Promise is resolved, it means that the file exists
|
||||
// Then we can add it to the files array
|
||||
await fs.promises.access(filePath, fs.constants.F_OK);
|
||||
files.push(tools[i] + '.json');
|
||||
} catch (err) {
|
||||
logger.error(`[loadSpecs] File ${tools[i] + '.json'} does not exist`, err);
|
||||
}
|
||||
}
|
||||
|
||||
if (files.length === 0) {
|
||||
files = (await fs.promises.readdir(directoryPath)).filter(
|
||||
(file) => path.extname(file) === '.json',
|
||||
);
|
||||
}
|
||||
|
||||
const validJsons = [];
|
||||
const constructorMap = {};
|
||||
|
||||
logger.debug('[validateJson] files', files);
|
||||
|
||||
for (const file of files) {
|
||||
if (path.extname(file) === '.json') {
|
||||
const filePath = path.join(directoryPath, file);
|
||||
const fileContent = await fs.promises.readFile(filePath, 'utf8');
|
||||
const json = JSON.parse(fileContent);
|
||||
|
||||
if (!validateJson(json)) {
|
||||
logger.debug('[validateJson] Invalid json', json);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (llm && map) {
|
||||
constructorMap[json.name_for_model] = async () =>
|
||||
await createOpenAPIPlugin({
|
||||
data: json,
|
||||
llm,
|
||||
message,
|
||||
memory,
|
||||
signal,
|
||||
user,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
if (llm) {
|
||||
validJsons.push(createOpenAPIPlugin({ data: json, llm }));
|
||||
continue;
|
||||
}
|
||||
|
||||
validJsons.push(json);
|
||||
}
|
||||
}
|
||||
|
||||
if (map) {
|
||||
return constructorMap;
|
||||
}
|
||||
|
||||
const plugins = (await Promise.all(validJsons)).filter((plugin) => plugin);
|
||||
|
||||
// logger.debug('[validateJson] plugins', plugins);
|
||||
// logger.debug(plugins[0].name);
|
||||
|
||||
return plugins;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
loadSpecs,
|
||||
validateJson,
|
||||
ManifestDefinition,
|
||||
};
|
||||
101
api/app/clients/tools/util/loadSpecs.spec.js
Normal file
101
api/app/clients/tools/util/loadSpecs.spec.js
Normal file
@@ -0,0 +1,101 @@
|
||||
const fs = require('fs');
|
||||
const { validateJson, loadSpecs, ManifestDefinition } = require('./loadSpecs');
|
||||
const { createOpenAPIPlugin } = require('../dynamic/OpenAPIPlugin');
|
||||
|
||||
jest.mock('../dynamic/OpenAPIPlugin');
|
||||
|
||||
describe('ManifestDefinition', () => {
|
||||
it('should validate correct json', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 'http://test.com',
|
||||
},
|
||||
};
|
||||
|
||||
expect(() => ManifestDefinition.parse(json)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should not validate incorrect json', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 123, // incorrect type
|
||||
},
|
||||
};
|
||||
|
||||
expect(() => ManifestDefinition.parse(json)).toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('validateJson', () => {
|
||||
it('should return parsed json if valid', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 'http://test.com',
|
||||
},
|
||||
};
|
||||
|
||||
expect(validateJson(json)).toEqual(json);
|
||||
});
|
||||
|
||||
it('should return false if json is not valid', () => {
|
||||
const json = {
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 123, // incorrect type
|
||||
},
|
||||
};
|
||||
|
||||
expect(validateJson(json)).toEqual(false);
|
||||
});
|
||||
});
|
||||
|
||||
describe('loadSpecs', () => {
|
||||
beforeEach(() => {
|
||||
jest.spyOn(fs.promises, 'readdir').mockResolvedValue(['test.json']);
|
||||
jest.spyOn(fs.promises, 'readFile').mockResolvedValue(
|
||||
JSON.stringify({
|
||||
name_for_human: 'Test',
|
||||
name_for_model: 'Test',
|
||||
description_for_human: 'Test',
|
||||
description_for_model: 'Test',
|
||||
api: {
|
||||
url: 'http://test.com',
|
||||
},
|
||||
}),
|
||||
);
|
||||
createOpenAPIPlugin.mockResolvedValue({});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should return plugins', async () => {
|
||||
const plugins = await loadSpecs({ llm: true, verbose: false });
|
||||
|
||||
expect(plugins).toHaveLength(1);
|
||||
expect(createOpenAPIPlugin).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
|
||||
it('should return constructorMap if map is true', async () => {
|
||||
const plugins = await loadSpecs({ llm: {}, map: true, verbose: false });
|
||||
|
||||
expect(plugins).toHaveProperty('Test');
|
||||
expect(createOpenAPIPlugin).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
5
api/cache/getLogStores.js
vendored
5
api/cache/getLogStores.js
vendored
@@ -61,10 +61,6 @@ const abortKeys = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const openIdExchangedTokensCache = isRedisEnabled
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.OPENID_EXCHANGED_TOKENS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const namespaces = {
|
||||
[CacheKeys.ROLES]: roles,
|
||||
[CacheKeys.CONFIG_STORE]: config,
|
||||
@@ -102,7 +98,6 @@ const namespaces = {
|
||||
[CacheKeys.AUDIO_RUNS]: audioRuns,
|
||||
[CacheKeys.MESSAGES]: messages,
|
||||
[CacheKeys.FLOWS]: flows,
|
||||
[CacheKeys.OPENID_EXCHANGED_TOKENS]: openIdExchangedTokensCache,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
10
api/cache/keyvRedis.js
vendored
10
api/cache/keyvRedis.js
vendored
@@ -75,15 +75,6 @@ if (REDIS_URI && isEnabled(USE_REDIS)) {
|
||||
} else {
|
||||
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
|
||||
}
|
||||
|
||||
const pingInterval = setInterval(
|
||||
() => {
|
||||
logger.debug('KeyvRedis ping');
|
||||
keyvRedis.client.ping().catch((err) => logger.error('Redis keep-alive ping failed:', err));
|
||||
},
|
||||
5 * 60 * 1000,
|
||||
);
|
||||
|
||||
keyvRedis.on('ready', () => {
|
||||
logger.info('KeyvRedis connection ready');
|
||||
});
|
||||
@@ -94,7 +85,6 @@ if (REDIS_URI && isEnabled(USE_REDIS)) {
|
||||
logger.info('KeyvRedis connection ended');
|
||||
});
|
||||
keyvRedis.on('close', () => {
|
||||
clearInterval(pingInterval);
|
||||
logger.info('KeyvRedis connection closed');
|
||||
});
|
||||
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
|
||||
|
||||
@@ -11,8 +11,5 @@ module.exports = {
|
||||
moduleNameMapper: {
|
||||
'~/(.*)': '<rootDir>/$1',
|
||||
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
|
||||
'^openid-client/passport$': '<rootDir>/test/__mocks__/openid-client-passport.js', // Mock for the passport strategy part
|
||||
'^openid-client$': '<rootDir>/test/__mocks__/openid-client.js',
|
||||
},
|
||||
transformIgnorePatterns: ['/node_modules/(?!(openid-client|oauth4webapi|jose)/).*/'],
|
||||
};
|
||||
|
||||
@@ -21,19 +21,7 @@ const Agent = mongoose.model('agent', agentSchema);
|
||||
* @throws {Error} If the agent creation fails.
|
||||
*/
|
||||
const createAgent = async (agentData) => {
|
||||
const { author, ...versionData } = agentData;
|
||||
const timestamp = new Date();
|
||||
const initialAgentData = {
|
||||
...agentData,
|
||||
versions: [
|
||||
{
|
||||
...versionData,
|
||||
createdAt: timestamp,
|
||||
updatedAt: timestamp,
|
||||
},
|
||||
],
|
||||
};
|
||||
return (await Agent.create(initialAgentData)).toObject();
|
||||
return (await Agent.create(agentData)).toObject();
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -60,17 +48,12 @@ const loadEphemeralAgent = ({ req, agent_id, endpoint, model_parameters: _m }) =
|
||||
const { model, ...model_parameters } = _m;
|
||||
/** @type {Record<string, FunctionTool>} */
|
||||
const availableTools = req.app.locals.availableTools;
|
||||
/** @type {TEphemeralAgent | null} */
|
||||
const ephemeralAgent = req.body.ephemeralAgent;
|
||||
const mcpServers = new Set(ephemeralAgent?.mcp);
|
||||
const mcpServers = new Set(req.body.ephemeralAgent?.mcp);
|
||||
/** @type {string[]} */
|
||||
const tools = [];
|
||||
if (ephemeralAgent?.execute_code === true) {
|
||||
if (req.body.ephemeralAgent?.execute_code === true) {
|
||||
tools.push(Tools.execute_code);
|
||||
}
|
||||
if (ephemeralAgent?.web_search === true) {
|
||||
tools.push(Tools.web_search);
|
||||
}
|
||||
|
||||
if (mcpServers.size > 0) {
|
||||
for (const toolName of Object.keys(availableTools)) {
|
||||
@@ -120,8 +103,6 @@ const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
|
||||
return null;
|
||||
}
|
||||
|
||||
agent.version = agent.versions ? agent.versions.length : 0;
|
||||
|
||||
if (agent.author.toString() === req.user.id) {
|
||||
return agent;
|
||||
}
|
||||
@@ -146,155 +127,18 @@ const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if a version already exists in the versions array, excluding timestamp and author fields
|
||||
* @param {Object} updateData - The update data to compare
|
||||
* @param {Array} versions - The existing versions array
|
||||
* @returns {Object|null} - The matching version if found, null otherwise
|
||||
*/
|
||||
const isDuplicateVersion = (updateData, currentData, versions) => {
|
||||
if (!versions || versions.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const excludeFields = [
|
||||
'_id',
|
||||
'id',
|
||||
'createdAt',
|
||||
'updatedAt',
|
||||
'author',
|
||||
'updatedBy',
|
||||
'created_at',
|
||||
'updated_at',
|
||||
'__v',
|
||||
'agent_ids',
|
||||
'versions',
|
||||
];
|
||||
|
||||
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
||||
|
||||
if (Object.keys(directUpdates).length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const wouldBeVersion = { ...currentData, ...directUpdates };
|
||||
const lastVersion = versions[versions.length - 1];
|
||||
|
||||
const allFields = new Set([...Object.keys(wouldBeVersion), ...Object.keys(lastVersion)]);
|
||||
|
||||
const importantFields = Array.from(allFields).filter((field) => !excludeFields.includes(field));
|
||||
|
||||
let isMatch = true;
|
||||
for (const field of importantFields) {
|
||||
if (!wouldBeVersion[field] && !lastVersion[field]) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (Array.isArray(wouldBeVersion[field]) && Array.isArray(lastVersion[field])) {
|
||||
if (wouldBeVersion[field].length !== lastVersion[field].length) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// Special handling for projectIds (MongoDB ObjectIds)
|
||||
if (field === 'projectIds') {
|
||||
const wouldBeIds = wouldBeVersion[field].map((id) => id.toString()).sort();
|
||||
const versionIds = lastVersion[field].map((id) => id.toString()).sort();
|
||||
|
||||
if (!wouldBeIds.every((id, i) => id === versionIds[i])) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Handle arrays of objects like tool_kwargs
|
||||
else if (typeof wouldBeVersion[field][0] === 'object' && wouldBeVersion[field][0] !== null) {
|
||||
const sortedWouldBe = [...wouldBeVersion[field]].map((item) => JSON.stringify(item)).sort();
|
||||
const sortedVersion = [...lastVersion[field]].map((item) => JSON.stringify(item)).sort();
|
||||
|
||||
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
const sortedWouldBe = [...wouldBeVersion[field]].sort();
|
||||
const sortedVersion = [...lastVersion[field]].sort();
|
||||
|
||||
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (field === 'model_parameters') {
|
||||
const wouldBeParams = wouldBeVersion[field] || {};
|
||||
const lastVersionParams = lastVersion[field] || {};
|
||||
if (JSON.stringify(wouldBeParams) !== JSON.stringify(lastVersionParams)) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
} else if (wouldBeVersion[field] !== lastVersion[field]) {
|
||||
isMatch = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return isMatch ? lastVersion : null;
|
||||
};
|
||||
|
||||
/**
|
||||
* Update an agent with new data without overwriting existing
|
||||
* properties, or create a new agent if it doesn't exist.
|
||||
* When an agent is updated, a copy of the current state will be saved to the versions array.
|
||||
*
|
||||
* @param {Object} searchParameter - The search parameters to find the agent to update.
|
||||
* @param {string} searchParameter.id - The ID of the agent to update.
|
||||
* @param {string} [searchParameter.author] - The user ID of the agent's author.
|
||||
* @param {Object} updateData - An object containing the properties to update.
|
||||
* @param {string} [updatingUserId] - The ID of the user performing the update (used for tracking non-author updates).
|
||||
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
|
||||
* @throws {Error} If the update would create a duplicate version
|
||||
*/
|
||||
const updateAgent = async (searchParameter, updateData, updatingUserId = null) => {
|
||||
const updateAgent = async (searchParameter, updateData) => {
|
||||
const options = { new: true, upsert: false };
|
||||
|
||||
const currentAgent = await Agent.findOne(searchParameter);
|
||||
if (currentAgent) {
|
||||
const { __v, _id, id, versions, author, ...versionData } = currentAgent.toObject();
|
||||
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
|
||||
|
||||
if (Object.keys(directUpdates).length > 0 && versions && versions.length > 0) {
|
||||
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions);
|
||||
if (duplicateVersion) {
|
||||
const error = new Error(
|
||||
'Duplicate version: This would create a version identical to an existing one',
|
||||
);
|
||||
error.statusCode = 409;
|
||||
error.details = {
|
||||
duplicateVersion,
|
||||
versionIndex: versions.findIndex(
|
||||
(v) => JSON.stringify(duplicateVersion) === JSON.stringify(v),
|
||||
),
|
||||
};
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
const versionEntry = {
|
||||
...versionData,
|
||||
...directUpdates,
|
||||
updatedAt: new Date(),
|
||||
};
|
||||
|
||||
// Always store updatedBy field to track who made the change
|
||||
if (updatingUserId) {
|
||||
versionEntry.updatedBy = new mongoose.Types.ObjectId(updatingUserId);
|
||||
}
|
||||
|
||||
updateData.$push = {
|
||||
...($push || {}),
|
||||
versions: versionEntry,
|
||||
};
|
||||
}
|
||||
|
||||
return Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
|
||||
};
|
||||
|
||||
@@ -307,7 +151,7 @@ const updateAgent = async (searchParameter, updateData, updatingUserId = null) =
|
||||
* @param {string} params.file_id
|
||||
* @returns {Promise<Agent>} The updated agent.
|
||||
*/
|
||||
const addAgentResourceFile = async ({ req, agent_id, tool_resource, file_id }) => {
|
||||
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
|
||||
const searchParameter = { id: agent_id };
|
||||
let agent = await getAgent(searchParameter);
|
||||
if (!agent) {
|
||||
@@ -333,7 +177,7 @@ const addAgentResourceFile = async ({ req, agent_id, tool_resource, file_id }) =
|
||||
},
|
||||
};
|
||||
|
||||
const updatedAgent = await updateAgent(searchParameter, updateData, req?.user?.id);
|
||||
const updatedAgent = await updateAgent(searchParameter, updateData);
|
||||
if (updatedAgent) {
|
||||
return updatedAgent;
|
||||
} else {
|
||||
@@ -464,7 +308,7 @@ const getListAgents = async (searchParameter) => {
|
||||
* This function also updates the corresponding projects to include or exclude the agent ID.
|
||||
*
|
||||
* @param {Object} params - Parameters for updating the agent's projects.
|
||||
* @param {MongoUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {import('librechat-data-provider').TUser} params.user - Parameters for updating the agent's projects.
|
||||
* @param {string} params.agentId - The ID of the agent to update.
|
||||
* @param {string[]} [params.projectIds] - Array of project IDs to add to the agent.
|
||||
* @param {string[]} [params.removeProjectIds] - Array of project IDs to remove from the agent.
|
||||
@@ -497,7 +341,7 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
|
||||
delete updateQuery.author;
|
||||
}
|
||||
|
||||
const updatedAgent = await updateAgent(updateQuery, updateOps, user.id);
|
||||
const updatedAgent = await updateAgent(updateQuery, updateOps);
|
||||
if (updatedAgent) {
|
||||
return updatedAgent;
|
||||
}
|
||||
@@ -514,40 +358,6 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
|
||||
return await getAgent({ id: agentId });
|
||||
};
|
||||
|
||||
/**
|
||||
* Reverts an agent to a specific version in its version history.
|
||||
* @param {Object} searchParameter - The search parameters to find the agent to revert.
|
||||
* @param {string} searchParameter.id - The ID of the agent to revert.
|
||||
* @param {string} [searchParameter.author] - The user ID of the agent's author.
|
||||
* @param {number} versionIndex - The index of the version to revert to in the versions array.
|
||||
* @returns {Promise<MongoAgent>} The updated agent document after reverting.
|
||||
* @throws {Error} If the agent is not found or the specified version does not exist.
|
||||
*/
|
||||
const revertAgentVersion = async (searchParameter, versionIndex) => {
|
||||
const agent = await Agent.findOne(searchParameter);
|
||||
if (!agent) {
|
||||
throw new Error('Agent not found');
|
||||
}
|
||||
|
||||
if (!agent.versions || !agent.versions[versionIndex]) {
|
||||
throw new Error(`Version ${versionIndex} not found`);
|
||||
}
|
||||
|
||||
const revertToVersion = agent.versions[versionIndex];
|
||||
|
||||
const updateData = {
|
||||
...revertToVersion,
|
||||
};
|
||||
|
||||
delete updateData._id;
|
||||
delete updateData.id;
|
||||
delete updateData.versions;
|
||||
delete updateData.author;
|
||||
delete updateData.updatedBy;
|
||||
|
||||
return Agent.findOneAndUpdate(searchParameter, updateData, { new: true }).lean();
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
Agent,
|
||||
getAgent,
|
||||
@@ -559,5 +369,4 @@ module.exports = {
|
||||
updateAgentProjects,
|
||||
addAgentResourceFile,
|
||||
removeAgentResourceFiles,
|
||||
revertAgentVersion,
|
||||
};
|
||||
|
||||
@@ -1,25 +1,7 @@
|
||||
const originalEnv = {
|
||||
CREDS_KEY: process.env.CREDS_KEY,
|
||||
CREDS_IV: process.env.CREDS_IV,
|
||||
};
|
||||
|
||||
process.env.CREDS_KEY = '0123456789abcdef0123456789abcdef';
|
||||
process.env.CREDS_IV = '0123456789abcdef';
|
||||
|
||||
const mongoose = require('mongoose');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const {
|
||||
Agent,
|
||||
addAgentResourceFile,
|
||||
removeAgentResourceFiles,
|
||||
createAgent,
|
||||
updateAgent,
|
||||
getAgent,
|
||||
deleteAgent,
|
||||
getListAgents,
|
||||
updateAgentProjects,
|
||||
} = require('./Agent');
|
||||
const { Agent, addAgentResourceFile, removeAgentResourceFiles } = require('./Agent');
|
||||
|
||||
describe('Agent Resource File Operations', () => {
|
||||
let mongoServer;
|
||||
@@ -33,8 +15,6 @@ describe('Agent Resource File Operations', () => {
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
process.env.CREDS_KEY = originalEnv.CREDS_KEY;
|
||||
process.env.CREDS_IV = originalEnv.CREDS_IV;
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
@@ -352,674 +332,3 @@ describe('Agent Resource File Operations', () => {
|
||||
expect(finalFileIds).toHaveLength(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Agent CRUD Operations', () => {
|
||||
let mongoServer;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
await Agent.deleteMany({});
|
||||
});
|
||||
|
||||
test('should create and get an agent', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
|
||||
const newAgent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
description: 'Test description',
|
||||
});
|
||||
|
||||
expect(newAgent).toBeDefined();
|
||||
expect(newAgent.id).toBe(agentId);
|
||||
expect(newAgent.name).toBe('Test Agent');
|
||||
|
||||
const retrievedAgent = await getAgent({ id: agentId });
|
||||
expect(retrievedAgent).toBeDefined();
|
||||
expect(retrievedAgent.id).toBe(agentId);
|
||||
expect(retrievedAgent.name).toBe('Test Agent');
|
||||
expect(retrievedAgent.description).toBe('Test description');
|
||||
});
|
||||
|
||||
test('should delete an agent', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Agent To Delete',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
});
|
||||
|
||||
const agentBeforeDelete = await getAgent({ id: agentId });
|
||||
expect(agentBeforeDelete).toBeDefined();
|
||||
|
||||
await deleteAgent({ id: agentId });
|
||||
|
||||
const agentAfterDelete = await getAgent({ id: agentId });
|
||||
expect(agentAfterDelete).toBeNull();
|
||||
});
|
||||
|
||||
test('should list agents by author', async () => {
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const otherAuthorId = new mongoose.Types.ObjectId();
|
||||
|
||||
const agentIds = [];
|
||||
for (let i = 0; i < 5; i++) {
|
||||
const id = `agent_${uuidv4()}`;
|
||||
agentIds.push(id);
|
||||
await createAgent({
|
||||
id,
|
||||
name: `Agent ${i}`,
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
});
|
||||
}
|
||||
|
||||
for (let i = 0; i < 3; i++) {
|
||||
await createAgent({
|
||||
id: `other_agent_${uuidv4()}`,
|
||||
name: `Other Agent ${i}`,
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: otherAuthorId,
|
||||
});
|
||||
}
|
||||
|
||||
const result = await getListAgents({ author: authorId.toString() });
|
||||
|
||||
expect(result).toBeDefined();
|
||||
expect(result.data).toBeDefined();
|
||||
expect(result.data).toHaveLength(5);
|
||||
expect(result.has_more).toBe(true);
|
||||
|
||||
for (const agent of result.data) {
|
||||
expect(agent.author).toBe(authorId.toString());
|
||||
}
|
||||
});
|
||||
|
||||
test('should update agent projects', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const projectId1 = new mongoose.Types.ObjectId();
|
||||
const projectId2 = new mongoose.Types.ObjectId();
|
||||
const projectId3 = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Project Test Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
projectIds: [projectId1],
|
||||
});
|
||||
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{ $addToSet: { projectIds: { $each: [projectId2, projectId3] } } },
|
||||
);
|
||||
|
||||
await updateAgent({ id: agentId }, { $pull: { projectIds: projectId1 } });
|
||||
|
||||
await updateAgent({ id: agentId }, { projectIds: [projectId2, projectId3] });
|
||||
|
||||
const updatedAgent = await getAgent({ id: agentId });
|
||||
expect(updatedAgent.projectIds).toHaveLength(2);
|
||||
expect(updatedAgent.projectIds.map((id) => id.toString())).toContain(projectId2.toString());
|
||||
expect(updatedAgent.projectIds.map((id) => id.toString())).toContain(projectId3.toString());
|
||||
expect(updatedAgent.projectIds.map((id) => id.toString())).not.toContain(projectId1.toString());
|
||||
|
||||
await updateAgent({ id: agentId }, { projectIds: [] });
|
||||
|
||||
const emptyProjectsAgent = await getAgent({ id: agentId });
|
||||
expect(emptyProjectsAgent.projectIds).toHaveLength(0);
|
||||
|
||||
const nonExistentId = `agent_${uuidv4()}`;
|
||||
await expect(
|
||||
updateAgentProjects({
|
||||
id: nonExistentId,
|
||||
projectIds: [projectId1],
|
||||
}),
|
||||
).rejects.toThrow();
|
||||
});
|
||||
|
||||
test('should handle ephemeral agent loading', async () => {
|
||||
const agentId = 'ephemeral_test';
|
||||
const endpoint = 'openai';
|
||||
|
||||
const originalModule = jest.requireActual('librechat-data-provider');
|
||||
|
||||
const mockDataProvider = {
|
||||
...originalModule,
|
||||
Constants: {
|
||||
...originalModule.Constants,
|
||||
EPHEMERAL_AGENT_ID: 'ephemeral_test',
|
||||
},
|
||||
};
|
||||
|
||||
jest.doMock('librechat-data-provider', () => mockDataProvider);
|
||||
|
||||
const mockReq = {
|
||||
user: { id: 'user123' },
|
||||
body: {
|
||||
promptPrefix: 'This is a test instruction',
|
||||
ephemeralAgent: {
|
||||
execute_code: true,
|
||||
mcp: ['server1', 'server2'],
|
||||
},
|
||||
},
|
||||
app: {
|
||||
locals: {
|
||||
availableTools: {
|
||||
tool__server1: {},
|
||||
tool__server2: {},
|
||||
another_tool: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const params = {
|
||||
req: mockReq,
|
||||
agent_id: agentId,
|
||||
endpoint,
|
||||
model_parameters: {
|
||||
model: 'gpt-4',
|
||||
temperature: 0.7,
|
||||
},
|
||||
};
|
||||
|
||||
expect(agentId).toBeDefined();
|
||||
expect(endpoint).toBeDefined();
|
||||
|
||||
jest.dontMock('librechat-data-provider');
|
||||
});
|
||||
|
||||
test('should handle loadAgent functionality and errors', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Load Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
tools: ['tool1', 'tool2'],
|
||||
});
|
||||
|
||||
const agent = await getAgent({ id: agentId });
|
||||
|
||||
expect(agent).toBeDefined();
|
||||
expect(agent.id).toBe(agentId);
|
||||
expect(agent.name).toBe('Test Load Agent');
|
||||
expect(agent.tools).toEqual(expect.arrayContaining(['tool1', 'tool2']));
|
||||
|
||||
const mockLoadAgent = jest.fn().mockResolvedValue(agent);
|
||||
const loadedAgent = await mockLoadAgent();
|
||||
expect(loadedAgent).toBeDefined();
|
||||
expect(loadedAgent.id).toBe(agentId);
|
||||
|
||||
const nonExistentId = `agent_${uuidv4()}`;
|
||||
const nonExistentAgent = await getAgent({ id: nonExistentId });
|
||||
expect(nonExistentAgent).toBeNull();
|
||||
|
||||
const mockLoadAgentError = jest.fn().mockRejectedValue(new Error('No agent found with ID'));
|
||||
await expect(mockLoadAgentError()).rejects.toThrow('No agent found with ID');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Agent Version History', () => {
|
||||
let mongoServer;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
await Agent.deleteMany({});
|
||||
});
|
||||
|
||||
test('should create an agent with a single entry in versions array', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const agent = await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: new mongoose.Types.ObjectId(),
|
||||
});
|
||||
|
||||
expect(agent.versions).toBeDefined();
|
||||
expect(Array.isArray(agent.versions)).toBe(true);
|
||||
expect(agent.versions).toHaveLength(1);
|
||||
expect(agent.versions[0].name).toBe('Test Agent');
|
||||
expect(agent.versions[0].provider).toBe('test');
|
||||
expect(agent.versions[0].model).toBe('test-model');
|
||||
});
|
||||
|
||||
test('should accumulate version history across multiple updates', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const author = new mongoose.Types.ObjectId();
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'First Name',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author,
|
||||
description: 'First description',
|
||||
});
|
||||
|
||||
await updateAgent({ id: agentId }, { name: 'Second Name', description: 'Second description' });
|
||||
await updateAgent({ id: agentId }, { name: 'Third Name', model: 'new-model' });
|
||||
const finalAgent = await updateAgent({ id: agentId }, { description: 'Final description' });
|
||||
|
||||
expect(finalAgent.versions).toBeDefined();
|
||||
expect(Array.isArray(finalAgent.versions)).toBe(true);
|
||||
expect(finalAgent.versions).toHaveLength(4);
|
||||
|
||||
expect(finalAgent.versions[0].name).toBe('First Name');
|
||||
expect(finalAgent.versions[0].description).toBe('First description');
|
||||
expect(finalAgent.versions[0].model).toBe('test-model');
|
||||
|
||||
expect(finalAgent.versions[1].name).toBe('Second Name');
|
||||
expect(finalAgent.versions[1].description).toBe('Second description');
|
||||
expect(finalAgent.versions[1].model).toBe('test-model');
|
||||
|
||||
expect(finalAgent.versions[2].name).toBe('Third Name');
|
||||
expect(finalAgent.versions[2].description).toBe('Second description');
|
||||
expect(finalAgent.versions[2].model).toBe('new-model');
|
||||
|
||||
expect(finalAgent.versions[3].name).toBe('Third Name');
|
||||
expect(finalAgent.versions[3].description).toBe('Final description');
|
||||
expect(finalAgent.versions[3].model).toBe('new-model');
|
||||
|
||||
expect(finalAgent.name).toBe('Third Name');
|
||||
expect(finalAgent.description).toBe('Final description');
|
||||
expect(finalAgent.model).toBe('new-model');
|
||||
});
|
||||
|
||||
test('should not include metadata fields in version history', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: new mongoose.Types.ObjectId(),
|
||||
});
|
||||
|
||||
const updatedAgent = await updateAgent({ id: agentId }, { description: 'New description' });
|
||||
|
||||
expect(updatedAgent.versions).toHaveLength(2);
|
||||
expect(updatedAgent.versions[0]._id).toBeUndefined();
|
||||
expect(updatedAgent.versions[0].__v).toBeUndefined();
|
||||
expect(updatedAgent.versions[0].name).toBe('Test Agent');
|
||||
expect(updatedAgent.versions[0].author).toBeUndefined();
|
||||
|
||||
expect(updatedAgent.versions[1]._id).toBeUndefined();
|
||||
expect(updatedAgent.versions[1].__v).toBeUndefined();
|
||||
});
|
||||
|
||||
test('should not recursively include previous versions', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Test Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: new mongoose.Types.ObjectId(),
|
||||
});
|
||||
|
||||
await updateAgent({ id: agentId }, { name: 'Updated Name 1' });
|
||||
await updateAgent({ id: agentId }, { name: 'Updated Name 2' });
|
||||
const finalAgent = await updateAgent({ id: agentId }, { name: 'Updated Name 3' });
|
||||
|
||||
expect(finalAgent.versions).toHaveLength(4);
|
||||
|
||||
finalAgent.versions.forEach((version) => {
|
||||
expect(version.versions).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle MongoDB operators and field updates correctly', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const projectId = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'MongoDB Operator Test',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
tools: ['tool1'],
|
||||
});
|
||||
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{
|
||||
description: 'Updated description',
|
||||
$push: { tools: 'tool2' },
|
||||
$addToSet: { projectIds: projectId },
|
||||
},
|
||||
);
|
||||
|
||||
const firstUpdate = await getAgent({ id: agentId });
|
||||
expect(firstUpdate.description).toBe('Updated description');
|
||||
expect(firstUpdate.tools).toContain('tool1');
|
||||
expect(firstUpdate.tools).toContain('tool2');
|
||||
expect(firstUpdate.projectIds.map((id) => id.toString())).toContain(projectId.toString());
|
||||
expect(firstUpdate.versions).toHaveLength(2);
|
||||
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{
|
||||
tools: ['tool2', 'tool3'],
|
||||
},
|
||||
);
|
||||
|
||||
const secondUpdate = await getAgent({ id: agentId });
|
||||
expect(secondUpdate.tools).toHaveLength(2);
|
||||
expect(secondUpdate.tools).toContain('tool2');
|
||||
expect(secondUpdate.tools).toContain('tool3');
|
||||
expect(secondUpdate.tools).not.toContain('tool1');
|
||||
expect(secondUpdate.versions).toHaveLength(3);
|
||||
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{
|
||||
$push: { tools: 'tool3' },
|
||||
},
|
||||
);
|
||||
|
||||
const thirdUpdate = await getAgent({ id: agentId });
|
||||
const toolCount = thirdUpdate.tools.filter((t) => t === 'tool3').length;
|
||||
expect(toolCount).toBe(2);
|
||||
expect(thirdUpdate.versions).toHaveLength(4);
|
||||
});
|
||||
|
||||
test('should handle parameter objects correctly', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Parameters Test',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
model_parameters: { temperature: 0.7 },
|
||||
});
|
||||
|
||||
const updatedAgent = await updateAgent(
|
||||
{ id: agentId },
|
||||
{ model_parameters: { temperature: 0.8 } },
|
||||
);
|
||||
|
||||
expect(updatedAgent.versions).toHaveLength(2);
|
||||
expect(updatedAgent.model_parameters.temperature).toBe(0.8);
|
||||
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{
|
||||
model_parameters: {
|
||||
temperature: 0.8,
|
||||
max_tokens: 1000,
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
const complexAgent = await getAgent({ id: agentId });
|
||||
expect(complexAgent.versions).toHaveLength(3);
|
||||
expect(complexAgent.model_parameters.temperature).toBe(0.8);
|
||||
expect(complexAgent.model_parameters.max_tokens).toBe(1000);
|
||||
|
||||
await updateAgent({ id: agentId }, { model_parameters: {} });
|
||||
|
||||
const emptyParamsAgent = await getAgent({ id: agentId });
|
||||
expect(emptyParamsAgent.versions).toHaveLength(4);
|
||||
expect(emptyParamsAgent.model_parameters).toEqual({});
|
||||
});
|
||||
|
||||
test('should detect duplicate versions and reject updates', async () => {
|
||||
const originalConsoleError = console.error;
|
||||
console.error = jest.fn();
|
||||
|
||||
try {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const authorId = new mongoose.Types.ObjectId();
|
||||
const projectId1 = new mongoose.Types.ObjectId();
|
||||
const projectId2 = new mongoose.Types.ObjectId();
|
||||
|
||||
const testCases = [
|
||||
{
|
||||
name: 'simple field update',
|
||||
initial: {
|
||||
name: 'Test Agent',
|
||||
description: 'Initial description',
|
||||
},
|
||||
update: { name: 'Updated Name' },
|
||||
duplicate: { name: 'Updated Name' },
|
||||
},
|
||||
{
|
||||
name: 'object field update',
|
||||
initial: {
|
||||
model_parameters: { temperature: 0.7 },
|
||||
},
|
||||
update: { model_parameters: { temperature: 0.8 } },
|
||||
duplicate: { model_parameters: { temperature: 0.8 } },
|
||||
},
|
||||
{
|
||||
name: 'array field update',
|
||||
initial: {
|
||||
tools: ['tool1', 'tool2'],
|
||||
},
|
||||
update: { tools: ['tool2', 'tool3'] },
|
||||
duplicate: { tools: ['tool2', 'tool3'] },
|
||||
},
|
||||
{
|
||||
name: 'projectIds update',
|
||||
initial: {
|
||||
projectIds: [projectId1],
|
||||
},
|
||||
update: { projectIds: [projectId1, projectId2] },
|
||||
duplicate: { projectIds: [projectId2, projectId1] },
|
||||
},
|
||||
];
|
||||
|
||||
for (const testCase of testCases) {
|
||||
const testAgentId = `agent_${uuidv4()}`;
|
||||
|
||||
await createAgent({
|
||||
id: testAgentId,
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: authorId,
|
||||
...testCase.initial,
|
||||
});
|
||||
|
||||
await updateAgent({ id: testAgentId }, testCase.update);
|
||||
|
||||
let error;
|
||||
try {
|
||||
await updateAgent({ id: testAgentId }, testCase.duplicate);
|
||||
} catch (e) {
|
||||
error = e;
|
||||
}
|
||||
|
||||
expect(error).toBeDefined();
|
||||
expect(error.message).toContain('Duplicate version');
|
||||
expect(error.statusCode).toBe(409);
|
||||
expect(error.details).toBeDefined();
|
||||
expect(error.details.duplicateVersion).toBeDefined();
|
||||
|
||||
const agent = await getAgent({ id: testAgentId });
|
||||
expect(agent.versions).toHaveLength(2);
|
||||
}
|
||||
} finally {
|
||||
console.error = originalConsoleError;
|
||||
}
|
||||
});
|
||||
|
||||
test('should track updatedBy when a different user updates an agent', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const originalAuthor = new mongoose.Types.ObjectId();
|
||||
const updatingUser = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Original Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: originalAuthor,
|
||||
description: 'Original description',
|
||||
});
|
||||
|
||||
const updatedAgent = await updateAgent(
|
||||
{ id: agentId },
|
||||
{ name: 'Updated Agent', description: 'Updated description' },
|
||||
updatingUser.toString(),
|
||||
);
|
||||
|
||||
expect(updatedAgent.versions).toHaveLength(2);
|
||||
expect(updatedAgent.versions[1].updatedBy.toString()).toBe(updatingUser.toString());
|
||||
expect(updatedAgent.author.toString()).toBe(originalAuthor.toString());
|
||||
});
|
||||
|
||||
test('should include updatedBy even when the original author updates the agent', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const originalAuthor = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Original Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: originalAuthor,
|
||||
description: 'Original description',
|
||||
});
|
||||
|
||||
const updatedAgent = await updateAgent(
|
||||
{ id: agentId },
|
||||
{ name: 'Updated Agent', description: 'Updated description' },
|
||||
originalAuthor.toString(),
|
||||
);
|
||||
|
||||
expect(updatedAgent.versions).toHaveLength(2);
|
||||
expect(updatedAgent.versions[1].updatedBy.toString()).toBe(originalAuthor.toString());
|
||||
expect(updatedAgent.author.toString()).toBe(originalAuthor.toString());
|
||||
});
|
||||
|
||||
test('should track multiple different users updating the same agent', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const originalAuthor = new mongoose.Types.ObjectId();
|
||||
const user1 = new mongoose.Types.ObjectId();
|
||||
const user2 = new mongoose.Types.ObjectId();
|
||||
const user3 = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Original Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: originalAuthor,
|
||||
description: 'Original description',
|
||||
});
|
||||
|
||||
// User 1 makes an update
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{ name: 'Updated by User 1', description: 'First update' },
|
||||
user1.toString(),
|
||||
);
|
||||
|
||||
// Original author makes an update
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{ description: 'Updated by original author' },
|
||||
originalAuthor.toString(),
|
||||
);
|
||||
|
||||
// User 2 makes an update
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{ name: 'Updated by User 2', model: 'new-model' },
|
||||
user2.toString(),
|
||||
);
|
||||
|
||||
// User 3 makes an update
|
||||
const finalAgent = await updateAgent(
|
||||
{ id: agentId },
|
||||
{ description: 'Final update by User 3' },
|
||||
user3.toString(),
|
||||
);
|
||||
|
||||
expect(finalAgent.versions).toHaveLength(5);
|
||||
expect(finalAgent.author.toString()).toBe(originalAuthor.toString());
|
||||
|
||||
// Check that each version has the correct updatedBy
|
||||
expect(finalAgent.versions[0].updatedBy).toBeUndefined(); // Initial creation has no updatedBy
|
||||
expect(finalAgent.versions[1].updatedBy.toString()).toBe(user1.toString());
|
||||
expect(finalAgent.versions[2].updatedBy.toString()).toBe(originalAuthor.toString());
|
||||
expect(finalAgent.versions[3].updatedBy.toString()).toBe(user2.toString());
|
||||
expect(finalAgent.versions[4].updatedBy.toString()).toBe(user3.toString());
|
||||
|
||||
// Verify the final state
|
||||
expect(finalAgent.name).toBe('Updated by User 2');
|
||||
expect(finalAgent.description).toBe('Final update by User 3');
|
||||
expect(finalAgent.model).toBe('new-model');
|
||||
});
|
||||
|
||||
test('should preserve original author during agent restoration', async () => {
|
||||
const agentId = `agent_${uuidv4()}`;
|
||||
const originalAuthor = new mongoose.Types.ObjectId();
|
||||
const updatingUser = new mongoose.Types.ObjectId();
|
||||
|
||||
await createAgent({
|
||||
id: agentId,
|
||||
name: 'Original Agent',
|
||||
provider: 'test',
|
||||
model: 'test-model',
|
||||
author: originalAuthor,
|
||||
description: 'Original description',
|
||||
});
|
||||
|
||||
await updateAgent(
|
||||
{ id: agentId },
|
||||
{ name: 'Updated Agent', description: 'Updated description' },
|
||||
updatingUser.toString(),
|
||||
);
|
||||
|
||||
const { revertAgentVersion } = require('./Agent');
|
||||
const revertedAgent = await revertAgentVersion({ id: agentId }, 0);
|
||||
|
||||
expect(revertedAgent.author.toString()).toBe(originalAuthor.toString());
|
||||
expect(revertedAgent.name).toBe('Original Agent');
|
||||
expect(revertedAgent.description).toBe('Original description');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -100,8 +100,6 @@ const tokenValues = Object.assign(
|
||||
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
|
||||
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
|
||||
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
||||
'claude-sonnet-4': { prompt: 3, completion: 15 },
|
||||
'claude-opus-4': { prompt: 15, completion: 75 },
|
||||
'claude-2.1': { prompt: 8, completion: 24 },
|
||||
'claude-2': { prompt: 8, completion: 24 },
|
||||
'claude-instant': { prompt: 0.8, completion: 2.4 },
|
||||
@@ -113,15 +111,10 @@ const tokenValues = Object.assign(
|
||||
/* cohere doesn't have rates for the older command models,
|
||||
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
gemma: { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-2': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-3': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-3-27b': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 },
|
||||
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.5-pro': { prompt: 1.25, completion: 10 },
|
||||
'gemini-2.5-flash': { prompt: 0.15, completion: 3.5 },
|
||||
'gemini-2.5-pro-preview-03-25': { prompt: 1.25, completion: 10 },
|
||||
'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time
|
||||
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
|
||||
@@ -164,8 +157,6 @@ const cacheTokenValues = {
|
||||
'claude-3.5-haiku': { write: 1, read: 0.08 },
|
||||
'claude-3-5-haiku': { write: 1, read: 0.08 },
|
||||
'claude-3-haiku': { write: 0.3, read: 0.03 },
|
||||
'claude-sonnet-4': { write: 3.75, read: 0.3 },
|
||||
'claude-opus-4': { write: 18.75, read: 1.5 },
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -488,9 +488,6 @@ describe('getCacheMultiplier', () => {
|
||||
|
||||
describe('Google Model Tests', () => {
|
||||
const googleModels = [
|
||||
'gemini-2.5-pro-preview-05-06',
|
||||
'gemini-2.5-flash-preview-04-17',
|
||||
'gemini-2.5-exp',
|
||||
'gemini-2.0-flash-lite-preview-02-05',
|
||||
'gemini-2.0-flash-001',
|
||||
'gemini-2.0-flash-exp',
|
||||
@@ -528,9 +525,6 @@ describe('Google Model Tests', () => {
|
||||
|
||||
it('should map to the correct model keys', () => {
|
||||
const expected = {
|
||||
'gemini-2.5-pro-preview-05-06': 'gemini-2.5-pro',
|
||||
'gemini-2.5-flash-preview-04-17': 'gemini-2.5-flash',
|
||||
'gemini-2.5-exp': 'gemini-2.5',
|
||||
'gemini-2.0-flash-lite-preview-02-05': 'gemini-2.0-flash-lite',
|
||||
'gemini-2.0-flash-001': 'gemini-2.0-flash',
|
||||
'gemini-2.0-flash-exp': 'gemini-2.0-flash',
|
||||
@@ -664,97 +658,3 @@ describe('Grok Model Tests - Pricing', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Claude Model Tests', () => {
|
||||
it('should return correct prompt and completion rates for Claude 4 models', () => {
|
||||
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['claude-sonnet-4'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'completion' })).toBe(
|
||||
tokenValues['claude-sonnet-4'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'claude-opus-4', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['claude-opus-4'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'claude-opus-4', tokenType: 'completion' })).toBe(
|
||||
tokenValues['claude-opus-4'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
|
||||
const modelVariations = [
|
||||
'claude-sonnet-4',
|
||||
'claude-sonnet-4-20240229',
|
||||
'claude-sonnet-4-latest',
|
||||
'anthropic/claude-sonnet-4',
|
||||
'claude-sonnet-4/anthropic',
|
||||
'claude-sonnet-4-preview',
|
||||
'claude-sonnet-4-20240229-preview',
|
||||
'claude-opus-4',
|
||||
'claude-opus-4-20240229',
|
||||
'claude-opus-4-latest',
|
||||
'anthropic/claude-opus-4',
|
||||
'claude-opus-4/anthropic',
|
||||
'claude-opus-4-preview',
|
||||
'claude-opus-4-20240229-preview',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
const valueKey = getValueKey(model);
|
||||
const isSonnet = model.includes('sonnet');
|
||||
const expectedKey = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
|
||||
|
||||
expect(valueKey).toBe(expectedKey);
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues[expectedKey].prompt);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
|
||||
tokenValues[expectedKey].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return correct cache rates for Claude 4 models', () => {
|
||||
expect(getCacheMultiplier({ model: 'claude-sonnet-4', cacheType: 'write' })).toBe(
|
||||
cacheTokenValues['claude-sonnet-4'].write,
|
||||
);
|
||||
expect(getCacheMultiplier({ model: 'claude-sonnet-4', cacheType: 'read' })).toBe(
|
||||
cacheTokenValues['claude-sonnet-4'].read,
|
||||
);
|
||||
expect(getCacheMultiplier({ model: 'claude-opus-4', cacheType: 'write' })).toBe(
|
||||
cacheTokenValues['claude-opus-4'].write,
|
||||
);
|
||||
expect(getCacheMultiplier({ model: 'claude-opus-4', cacheType: 'read' })).toBe(
|
||||
cacheTokenValues['claude-opus-4'].read,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle Claude 4 model cache rates with different prefixes and suffixes', () => {
|
||||
const modelVariations = [
|
||||
'claude-sonnet-4',
|
||||
'claude-sonnet-4-20240229',
|
||||
'claude-sonnet-4-latest',
|
||||
'anthropic/claude-sonnet-4',
|
||||
'claude-sonnet-4/anthropic',
|
||||
'claude-sonnet-4-preview',
|
||||
'claude-sonnet-4-20240229-preview',
|
||||
'claude-opus-4',
|
||||
'claude-opus-4-20240229',
|
||||
'claude-opus-4-latest',
|
||||
'anthropic/claude-opus-4',
|
||||
'claude-opus-4/anthropic',
|
||||
'claude-opus-4-preview',
|
||||
'claude-opus-4-20240229-preview',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
const isSonnet = model.includes('sonnet');
|
||||
const expectedKey = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
|
||||
|
||||
expect(getCacheMultiplier({ model, cacheType: 'write' })).toBe(
|
||||
cacheTokenValues[expectedKey].write,
|
||||
);
|
||||
expect(getCacheMultiplier({ model, cacheType: 'read' })).toBe(
|
||||
cacheTokenValues[expectedKey].read,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.7",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -43,12 +43,12 @@
|
||||
"@google/generative-ai": "^0.23.0",
|
||||
"@googleapis/youtube": "^20.0.0",
|
||||
"@keyv/redis": "^4.3.3",
|
||||
"@langchain/community": "^0.3.44",
|
||||
"@langchain/core": "^0.3.57",
|
||||
"@langchain/google-genai": "^0.2.9",
|
||||
"@langchain/google-vertexai": "^0.2.9",
|
||||
"@langchain/community": "^0.3.39",
|
||||
"@langchain/core": "^0.3.43",
|
||||
"@langchain/google-genai": "^0.2.2",
|
||||
"@langchain/google-vertexai": "^0.2.3",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.37",
|
||||
"@librechat/agents": "^2.4.22",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@waylaidwanderer/fetch-event-source": "^3.0.1",
|
||||
"axios": "^1.8.2",
|
||||
@@ -75,7 +75,6 @@
|
||||
"ioredis": "^5.3.2",
|
||||
"js-yaml": "^4.1.0",
|
||||
"jsonwebtoken": "^9.0.0",
|
||||
"jwks-rsa": "^3.2.0",
|
||||
"keyv": "^5.3.2",
|
||||
"keyv-file": "^5.1.2",
|
||||
"klona": "^2.0.6",
|
||||
@@ -87,13 +86,13 @@
|
||||
"mime": "^3.0.0",
|
||||
"module-alias": "^2.2.3",
|
||||
"mongoose": "^8.12.1",
|
||||
"multer": "^2.0.0",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"nanoid": "^3.3.7",
|
||||
"nodemailer": "^6.9.15",
|
||||
"ollama": "^0.5.0",
|
||||
"openai": "^4.96.2",
|
||||
"openai": "^4.47.1",
|
||||
"openai-chat-tokens": "^0.2.8",
|
||||
"openid-client": "^6.5.0",
|
||||
"openid-client": "^5.4.2",
|
||||
"passport": "^0.6.0",
|
||||
"passport-apple": "^2.0.2",
|
||||
"passport-discord": "^0.1.4",
|
||||
@@ -117,6 +116,6 @@
|
||||
"jest": "^29.7.0",
|
||||
"mongodb-memory-server": "^10.1.3",
|
||||
"nodemon": "^3.0.3",
|
||||
"supertest": "^7.1.0"
|
||||
"supertest": "^7.0.0"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,17 +16,17 @@ const FinalizationRegistry = global.FinalizationRegistry || null;
|
||||
*/
|
||||
const clientRegistry = FinalizationRegistry
|
||||
? new FinalizationRegistry((heldValue) => {
|
||||
try {
|
||||
// This will run when the client is garbage collected
|
||||
if (heldValue && heldValue.userId) {
|
||||
logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`);
|
||||
} else {
|
||||
logger.debug('[FinalizationRegistry] Cleaning up client');
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore errors
|
||||
try {
|
||||
// This will run when the client is garbage collected
|
||||
if (heldValue && heldValue.userId) {
|
||||
logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`);
|
||||
} else {
|
||||
logger.debug('[FinalizationRegistry] Cleaning up client');
|
||||
}
|
||||
})
|
||||
} catch (e) {
|
||||
// Ignore errors
|
||||
}
|
||||
})
|
||||
: null;
|
||||
|
||||
/**
|
||||
@@ -134,8 +134,8 @@ function disposeClient(client) {
|
||||
if (client.message_delta) {
|
||||
client.message_delta = null;
|
||||
}
|
||||
if (client.isClaudeLatest !== undefined) {
|
||||
client.isClaudeLatest = null;
|
||||
if (client.isClaude3 !== undefined) {
|
||||
client.isClaude3 = null;
|
||||
}
|
||||
if (client.useMessages !== undefined) {
|
||||
client.useMessages = null;
|
||||
|
||||
@@ -128,7 +128,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
clientRef = new WeakRef(client);
|
||||
|
||||
getAbortData = () => {
|
||||
const currentClient = clientRef?.deref();
|
||||
const currentClient = clientRef.deref();
|
||||
const currentText =
|
||||
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||
|
||||
@@ -228,7 +228,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
|
||||
if (!client?.skipSaveUserMessage && latestUserMessage) {
|
||||
await saveMessage(req, latestUserMessage, {
|
||||
context: "api/server/controllers/AskController.js - don't skip saving user message",
|
||||
context: 'api/server/controllers/AskController.js - don\'t skip saving user message',
|
||||
});
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
logger.error('[AskController] Error handling request', error);
|
||||
let partialText = '';
|
||||
try {
|
||||
const currentClient = clientRef?.deref();
|
||||
const currentClient = clientRef.deref();
|
||||
partialText =
|
||||
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||
} catch (getTextError) {
|
||||
@@ -268,7 +268,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
conversationId: reqDataContext.conversationId,
|
||||
messageId: reqDataContext.responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? reqDataContext.userMessageId ?? parentMessageId,
|
||||
userMessageId: reqDataContext.userMessageId,
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error('[AskController] Error in `handleAbortError` during catch block', err);
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
const openIdClient = require('openid-client');
|
||||
const cookies = require('cookie');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const {
|
||||
@@ -6,12 +5,9 @@ const {
|
||||
resetPassword,
|
||||
setAuthTokens,
|
||||
requestPasswordReset,
|
||||
setOpenIDAuthTokens,
|
||||
} = require('~/server/services/AuthService');
|
||||
const { findSession, getUserById, deleteAllUserSessions, findUser } = require('~/models');
|
||||
const { getOpenIdConfig } = require('~/strategies');
|
||||
const { findSession, getUserById, deleteAllUserSessions } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
const registrationController = async (req, res) => {
|
||||
try {
|
||||
@@ -59,28 +55,10 @@ const resetPasswordController = async (req, res) => {
|
||||
|
||||
const refreshController = async (req, res) => {
|
||||
const refreshToken = req.headers.cookie ? cookies.parse(req.headers.cookie).refreshToken : null;
|
||||
const token_provider = req.headers.cookie
|
||||
? cookies.parse(req.headers.cookie).token_provider
|
||||
: null;
|
||||
if (!refreshToken) {
|
||||
return res.status(200).send('Refresh token not provided');
|
||||
}
|
||||
if (token_provider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS) === true) {
|
||||
try {
|
||||
const openIdConfig = getOpenIdConfig();
|
||||
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
|
||||
const claims = tokenset.claims();
|
||||
const user = await findUser({ email: claims.email });
|
||||
if (!user) {
|
||||
return res.status(401).redirect('/login');
|
||||
}
|
||||
const token = setOpenIDAuthTokens(tokenset, res);
|
||||
return res.status(200).send({ token, user });
|
||||
} catch (error) {
|
||||
logger.error('[refreshController] OpenID token refresh error', error);
|
||||
return res.status(403).send('Invalid OpenID refresh token');
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
|
||||
const user = await getUserById(payload.id, '-password -__v -totpSecret');
|
||||
|
||||
@@ -123,7 +123,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
clientRef = new WeakRef(client);
|
||||
|
||||
getAbortData = () => {
|
||||
const currentClient = clientRef?.deref();
|
||||
const currentClient = clientRef.deref();
|
||||
const currentText =
|
||||
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||
|
||||
@@ -219,7 +219,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
logger.error('[EditController] Error handling request', error);
|
||||
let partialText = '';
|
||||
try {
|
||||
const currentClient = clientRef?.deref();
|
||||
const currentClient = clientRef.deref();
|
||||
partialText =
|
||||
currentClient?.getStreamText != null ? currentClient.getStreamText() : getPartialText();
|
||||
} catch (getTextError) {
|
||||
@@ -232,7 +232,6 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
conversationId,
|
||||
messageId: reqDataContext.responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
|
||||
userMessageId,
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error('[EditController] Error in `handleAbortError` during catch block', err);
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const { CacheKeys, AuthType } = require('librechat-data-provider');
|
||||
const { getToolkitKey } = require('~/server/services/ToolService');
|
||||
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
|
||||
const { getCustomConfig } = require('~/server/services/Config');
|
||||
const { availableTools } = require('~/app/clients/tools');
|
||||
const { getMCPManager } = require('~/config');
|
||||
@@ -69,7 +69,7 @@ const getAvailablePluginsController = async (req, res) => {
|
||||
);
|
||||
}
|
||||
|
||||
let plugins = authenticatedPlugins;
|
||||
let plugins = await addOpenAPISpecs(authenticatedPlugins);
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
plugins = plugins.filter((plugin) => includedTools.includes(plugin.pluginKey));
|
||||
@@ -105,11 +105,11 @@ const getAvailableTools = async (req, res) => {
|
||||
return;
|
||||
}
|
||||
|
||||
let pluginManifest = availableTools;
|
||||
const pluginManifest = availableTools;
|
||||
const customConfig = await getCustomConfig();
|
||||
if (customConfig?.mcpServers != null) {
|
||||
const mcpManager = getMCPManager();
|
||||
pluginManifest = await mcpManager.loadManifestTools(pluginManifest);
|
||||
await mcpManager.loadManifestTools(pluginManifest);
|
||||
}
|
||||
|
||||
/** @type {TPlugin[]} */
|
||||
@@ -128,7 +128,7 @@ const getAvailableTools = async (req, res) => {
|
||||
(plugin) =>
|
||||
toolDefinitions[plugin.pluginKey] !== undefined ||
|
||||
(plugin.toolkit === true &&
|
||||
Object.keys(toolDefinitions).some((key) => getToolkitKey(key) === plugin.pluginKey)),
|
||||
Object.keys(toolDefinitions).some((key) => key.startsWith(`${plugin.pluginKey}_`))),
|
||||
);
|
||||
|
||||
await cache.set(CacheKeys.TOOLS, tools);
|
||||
|
||||
@@ -1,10 +1,4 @@
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
FileSources,
|
||||
webSearchKeys,
|
||||
extractWebSearchEnvVars,
|
||||
} = require('librechat-data-provider');
|
||||
const { FileSources } = require('librechat-data-provider');
|
||||
const {
|
||||
Balance,
|
||||
getFiles,
|
||||
@@ -89,6 +83,7 @@ const deleteUserFiles = async (req) => {
|
||||
const updateUserPluginsController = async (req, res) => {
|
||||
const { user } = req;
|
||||
const { pluginKey, action, auth, isEntityTool } = req.body;
|
||||
let authService;
|
||||
try {
|
||||
if (!isEntityTool) {
|
||||
const userPluginsService = await updateUserPluginsService(user, pluginKey, action);
|
||||
@@ -100,55 +95,32 @@ const updateUserPluginsController = async (req, res) => {
|
||||
}
|
||||
}
|
||||
|
||||
if (auth == null) {
|
||||
return res.status(200).send();
|
||||
}
|
||||
|
||||
let keys = Object.keys(auth);
|
||||
if (keys.length === 0 && pluginKey !== Tools.web_search) {
|
||||
return res.status(200).send();
|
||||
}
|
||||
const values = Object.values(auth);
|
||||
|
||||
/** @type {number} */
|
||||
let status = 200;
|
||||
/** @type {string} */
|
||||
let message;
|
||||
/** @type {IPluginAuth | Error} */
|
||||
let authService;
|
||||
|
||||
if (pluginKey === Tools.web_search) {
|
||||
/** @type {TCustomConfig['webSearch']} */
|
||||
const webSearchConfig = req.app.locals?.webSearch;
|
||||
keys = extractWebSearchEnvVars({
|
||||
keys: action === 'install' ? keys : webSearchKeys,
|
||||
config: webSearchConfig,
|
||||
});
|
||||
}
|
||||
|
||||
if (action === 'install') {
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
|
||||
if (authService instanceof Error) {
|
||||
logger.error('[authService]', authService);
|
||||
({ status, message } = authService);
|
||||
if (auth) {
|
||||
const keys = Object.keys(auth);
|
||||
const values = Object.values(auth);
|
||||
if (action === 'install' && keys.length > 0) {
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
|
||||
if (authService instanceof Error) {
|
||||
logger.error('[authService]', authService);
|
||||
const { status, message } = authService;
|
||||
res.status(status).send({ message });
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (action === 'uninstall') {
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
authService = await deleteUserPluginAuth(user.id, keys[i]);
|
||||
if (authService instanceof Error) {
|
||||
logger.error('[authService]', authService);
|
||||
({ status, message } = authService);
|
||||
if (action === 'uninstall' && keys.length > 0) {
|
||||
for (let i = 0; i < keys.length; i++) {
|
||||
authService = await deleteUserPluginAuth(user.id, keys[i]);
|
||||
if (authService instanceof Error) {
|
||||
logger.error('[authService]', authService);
|
||||
const { status, message } = authService;
|
||||
res.status(status).send({ message });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (status === 200) {
|
||||
return res.status(status).send();
|
||||
}
|
||||
|
||||
res.status(status).send({ message });
|
||||
res.status(200).send();
|
||||
} catch (err) {
|
||||
logger.error('[updateUserPluginsController]', err);
|
||||
return res.status(500).json({ message: 'Something went wrong.' });
|
||||
|
||||
@@ -14,6 +14,15 @@ const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { saveBase64Image } = require('~/server/services/Files/process');
|
||||
const { logger, sendEvent } = require('~/config');
|
||||
|
||||
/** @typedef {import('@librechat/agents').Graph} Graph */
|
||||
/** @typedef {import('@librechat/agents').EventHandler} EventHandler */
|
||||
/** @typedef {import('@librechat/agents').ModelEndData} ModelEndData */
|
||||
/** @typedef {import('@librechat/agents').ToolEndData} ToolEndData */
|
||||
/** @typedef {import('@librechat/agents').ToolEndCallback} ToolEndCallback */
|
||||
/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */
|
||||
/** @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator */
|
||||
/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */
|
||||
|
||||
class ModelEndHandler {
|
||||
/**
|
||||
* @param {Array<UsageMetadata>} collectedUsage
|
||||
@@ -29,7 +38,7 @@ class ModelEndHandler {
|
||||
* @param {string} event
|
||||
* @param {ModelEndData | undefined} data
|
||||
* @param {Record<string, unknown> | undefined} metadata
|
||||
* @param {StandardGraph} graph
|
||||
* @param {Graph} graph
|
||||
* @returns
|
||||
*/
|
||||
handle(event, data, metadata, graph) {
|
||||
@@ -52,10 +61,7 @@ class ModelEndHandler {
|
||||
}
|
||||
|
||||
this.collectedUsage.push(usage);
|
||||
const streamingDisabled = !!(
|
||||
graph.clientOptions?.disableStreaming || graph?.boundModel?.disableStreaming
|
||||
);
|
||||
if (!streamingDisabled) {
|
||||
if (!graph.clientOptions?.disableStreaming) {
|
||||
return;
|
||||
}
|
||||
if (!data.output.content) {
|
||||
@@ -237,30 +243,6 @@ function createToolEndCallback({ req, res, artifactPromises }) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (output.artifact[Tools.web_search]) {
|
||||
artifactPromises.push(
|
||||
(async () => {
|
||||
const name = `${output.name}_${output.tool_call_id}_${nanoid()}`;
|
||||
const attachment = {
|
||||
name,
|
||||
type: Tools.web_search,
|
||||
messageId: metadata.run_id,
|
||||
toolCallId: output.tool_call_id,
|
||||
conversationId: metadata.thread_id,
|
||||
[Tools.web_search]: { ...output.artifact[Tools.web_search] },
|
||||
};
|
||||
if (!res.headersSent) {
|
||||
return attachment;
|
||||
}
|
||||
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
|
||||
return attachment;
|
||||
})().catch((error) => {
|
||||
logger.error('Error processing artifact content:', error);
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
if (output.artifact.content) {
|
||||
/** @type {FormattedContent[]} */
|
||||
const content = output.artifact.content;
|
||||
|
||||
@@ -39,6 +39,9 @@ const BaseClient = require('~/app/clients/BaseClient');
|
||||
const { logger, sendEvent } = require('~/config');
|
||||
const { createRun } = require('./run');
|
||||
|
||||
/** @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex */
|
||||
/** @typedef {import('@langchain/core/runnables').RunnableConfig} RunnableConfig */
|
||||
|
||||
/**
|
||||
* @param {ServerRequest} req
|
||||
* @param {Agent} agent
|
||||
@@ -55,7 +58,7 @@ const payloadParser = ({ req, agent, endpoint }) => {
|
||||
|
||||
const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deepseek]);
|
||||
|
||||
const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
|
||||
const noSystemModelRegex = [/\b(o\d)\b/gi];
|
||||
|
||||
// const { processMemory, memoryInstructions } = require('~/server/services/Endpoints/agents/memory');
|
||||
// const { getFormattedMemories } = require('~/models/Memory');
|
||||
@@ -145,13 +148,19 @@ class AgentClient extends BaseClient {
|
||||
* @param {MongoFile[]} attachments
|
||||
*/
|
||||
checkVisionRequest(attachments) {
|
||||
logger.info(
|
||||
'[api/server/controllers/agents/client.js #checkVisionRequest] not implemented',
|
||||
attachments,
|
||||
);
|
||||
// if (!attachments) {
|
||||
// return;
|
||||
// }
|
||||
|
||||
// const availableModels = this.options.modelsConfig?.[this.options.endpoint];
|
||||
// if (!availableModels) {
|
||||
// return;
|
||||
// }
|
||||
|
||||
// let visionRequestDetected = false;
|
||||
// for (const file of attachments) {
|
||||
// if (file?.type?.includes('image')) {
|
||||
@@ -162,11 +171,13 @@ class AgentClient extends BaseClient {
|
||||
// if (!visionRequestDetected) {
|
||||
// return;
|
||||
// }
|
||||
|
||||
// this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||
// if (this.isVisionModel) {
|
||||
// delete this.modelOptions.stop;
|
||||
// return;
|
||||
// }
|
||||
|
||||
// for (const model of availableModels) {
|
||||
// if (!validateVisionModel({ model, availableModels })) {
|
||||
// continue;
|
||||
@@ -176,12 +187,14 @@ class AgentClient extends BaseClient {
|
||||
// delete this.modelOptions.stop;
|
||||
// return;
|
||||
// }
|
||||
|
||||
// if (!availableModels.includes(this.defaultVisionModel)) {
|
||||
// return;
|
||||
// }
|
||||
// if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) {
|
||||
// return;
|
||||
// }
|
||||
|
||||
// this.modelOptions.model = this.defaultVisionModel;
|
||||
// this.isVisionModel = true;
|
||||
// delete this.modelOptions.stop;
|
||||
@@ -540,7 +553,7 @@ class AgentClient extends BaseClient {
|
||||
}
|
||||
|
||||
async chatCompletion({ payload, abortController = null }) {
|
||||
/** @type {Partial<GraphRunnableConfig>} */
|
||||
/** @type {Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string }} */
|
||||
let config;
|
||||
/** @type {ReturnType<createRun>} */
|
||||
let run;
|
||||
@@ -660,7 +673,7 @@ class AgentClient extends BaseClient {
|
||||
this.indexTokenCountMap,
|
||||
toolSet,
|
||||
);
|
||||
if (legacyContentEndpoints.has(this.options.agent.endpoint?.toLowerCase())) {
|
||||
if (legacyContentEndpoints.has(this.options.agent.endpoint)) {
|
||||
initialMessages = formatContentStrings(initialMessages);
|
||||
}
|
||||
|
||||
@@ -715,14 +728,12 @@ class AgentClient extends BaseClient {
|
||||
}
|
||||
|
||||
if (noSystemMessages === true && systemContent?.length) {
|
||||
const latestMessageContent = _messages.pop().content;
|
||||
let latestMessage = _messages.pop().content;
|
||||
if (typeof latestMessage !== 'string') {
|
||||
latestMessageContent[0].text = [systemContent, latestMessageContent[0].text].join('\n');
|
||||
_messages.push(new HumanMessage({ content: latestMessageContent }));
|
||||
} else {
|
||||
const text = [systemContent, latestMessageContent].join('\n');
|
||||
_messages.push(new HumanMessage(text));
|
||||
latestMessage = latestMessage[0].text;
|
||||
}
|
||||
latestMessage = [systemContent, latestMessage].join('\n');
|
||||
_messages.push(new HumanMessage(latestMessage));
|
||||
}
|
||||
|
||||
let messages = _messages;
|
||||
|
||||
@@ -259,7 +259,6 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
sender,
|
||||
messageId: responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
|
||||
userMessageId,
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error('[api/server/controllers/agents/request] Error in `handleAbortError`', err);
|
||||
|
||||
@@ -23,7 +23,6 @@ const { updateAction, getActions } = require('~/models/Action');
|
||||
const { updateAgentProjects } = require('~/models/Agent');
|
||||
const { getProjectByName } = require('~/models/Project');
|
||||
const { deleteFileByFilter } = require('~/models/File');
|
||||
const { revertAgentVersion } = require('~/models/Agent');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const systemTools = {
|
||||
@@ -105,13 +104,11 @@ const getAgentHandler = async (req, res) => {
|
||||
return res.status(404).json({ error: 'Agent not found' });
|
||||
}
|
||||
|
||||
agent.version = agent.versions ? agent.versions.length : 0;
|
||||
|
||||
if (agent.avatar && agent.avatar?.source === FileSources.s3) {
|
||||
const originalUrl = agent.avatar.filepath;
|
||||
agent.avatar.filepath = await refreshS3Url(agent.avatar);
|
||||
if (originalUrl !== agent.avatar.filepath) {
|
||||
await updateAgent({ id }, { avatar: agent.avatar }, req.user.id);
|
||||
await updateAgent({ id }, { avatar: agent.avatar });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +127,6 @@ const getAgentHandler = async (req, res) => {
|
||||
author: agent.author,
|
||||
projectIds: agent.projectIds,
|
||||
isCollaborative: agent.isCollaborative,
|
||||
version: agent.version,
|
||||
});
|
||||
}
|
||||
return res.status(200).json(agent);
|
||||
@@ -169,9 +165,7 @@ const updateAgentHandler = async (req, res) => {
|
||||
}
|
||||
|
||||
let updatedAgent =
|
||||
Object.keys(updateData).length > 0
|
||||
? await updateAgent({ id }, updateData, req.user.id)
|
||||
: existingAgent;
|
||||
Object.keys(updateData).length > 0 ? await updateAgent({ id }, updateData) : existingAgent;
|
||||
|
||||
if (projectIds || removeProjectIds) {
|
||||
updatedAgent = await updateAgentProjects({
|
||||
@@ -193,14 +187,6 @@ const updateAgentHandler = async (req, res) => {
|
||||
return res.json(updatedAgent);
|
||||
} catch (error) {
|
||||
logger.error('[/Agents/:id] Error updating Agent', error);
|
||||
|
||||
if (error.statusCode === 409) {
|
||||
return res.status(409).json({
|
||||
error: error.message,
|
||||
details: error.details,
|
||||
});
|
||||
}
|
||||
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
};
|
||||
@@ -407,7 +393,7 @@ const uploadAgentAvatarHandler = async (req, res) => {
|
||||
},
|
||||
};
|
||||
|
||||
promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data, req.user.id));
|
||||
promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data));
|
||||
|
||||
const resolved = await Promise.all(promises);
|
||||
res.status(201).json(resolved[0]);
|
||||
@@ -425,66 +411,6 @@ const uploadAgentAvatarHandler = async (req, res) => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Reverts an agent to a previous version from its version history.
|
||||
* @route PATCH /agents/:id/revert
|
||||
* @param {object} req - Express Request object
|
||||
* @param {object} req.params - Request parameters
|
||||
* @param {string} req.params.id - The ID of the agent to revert
|
||||
* @param {object} req.body - Request body
|
||||
* @param {number} req.body.version_index - The index of the version to revert to
|
||||
* @param {object} req.user - Authenticated user information
|
||||
* @param {string} req.user.id - User ID
|
||||
* @param {string} req.user.role - User role
|
||||
* @param {ServerResponse} res - Express Response object
|
||||
* @returns {Promise<Agent>} 200 - The updated agent after reverting to the specified version
|
||||
* @throws {Error} 400 - If version_index is missing
|
||||
* @throws {Error} 403 - If user doesn't have permission to modify the agent
|
||||
* @throws {Error} 404 - If agent not found
|
||||
* @throws {Error} 500 - If there's an internal server error during the reversion process
|
||||
*/
|
||||
const revertAgentVersionHandler = async (req, res) => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const { version_index } = req.body;
|
||||
|
||||
if (version_index === undefined) {
|
||||
return res.status(400).json({ error: 'version_index is required' });
|
||||
}
|
||||
|
||||
const isAdmin = req.user.role === SystemRoles.ADMIN;
|
||||
const existingAgent = await getAgent({ id });
|
||||
|
||||
if (!existingAgent) {
|
||||
return res.status(404).json({ error: 'Agent not found' });
|
||||
}
|
||||
|
||||
const isAuthor = existingAgent.author.toString() === req.user.id;
|
||||
const hasEditPermission = existingAgent.isCollaborative || isAdmin || isAuthor;
|
||||
|
||||
if (!hasEditPermission) {
|
||||
return res.status(403).json({
|
||||
error: 'You do not have permission to modify this non-collaborative agent',
|
||||
});
|
||||
}
|
||||
|
||||
const updatedAgent = await revertAgentVersion({ id }, version_index);
|
||||
|
||||
if (updatedAgent.author) {
|
||||
updatedAgent.author = updatedAgent.author.toString();
|
||||
}
|
||||
|
||||
if (updatedAgent.author !== req.user.id) {
|
||||
delete updatedAgent.author;
|
||||
}
|
||||
|
||||
return res.json(updatedAgent);
|
||||
} catch (error) {
|
||||
logger.error('[/agents/:id/revert] Error reverting Agent version', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
createAgent: createAgentHandler,
|
||||
getAgent: getAgentHandler,
|
||||
@@ -493,5 +419,4 @@ module.exports = {
|
||||
deleteAgent: deleteAgentHandler,
|
||||
getListAgents: getListAgentsHandler,
|
||||
uploadAgentAvatar: uploadAgentAvatarHandler,
|
||||
revertAgentVersion: revertAgentVersionHandler,
|
||||
};
|
||||
|
||||
@@ -119,7 +119,7 @@ const chatV1 = async (req, res) => {
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
endpoint === EModelEndpoint.azureAssistants
|
||||
? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload."
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(req, res, messageData, errorMessage);
|
||||
@@ -326,15 +326,8 @@ const chatV1 = async (req, res) => {
|
||||
|
||||
file_ids = files.map(({ file_id }) => file_id);
|
||||
if (file_ids.length || thread_file_ids.length) {
|
||||
userMessage.file_ids = file_ids;
|
||||
attachedFileIds = new Set([...file_ids, ...thread_file_ids]);
|
||||
if (endpoint === EModelEndpoint.azureAssistants) {
|
||||
userMessage.attachments = Array.from(attachedFileIds).map((file_id) => ({
|
||||
file_id,
|
||||
tools: [{ type: 'file_search' }],
|
||||
}));
|
||||
} else {
|
||||
userMessage.file_ids = Array.from(attachedFileIds);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -386,8 +379,8 @@ const chatV1 = async (req, res) => {
|
||||
body.additional_instructions ? `${body.additional_instructions}\n` : ''
|
||||
}The user has uploaded ${imageCount} image${pluralized}.
|
||||
Use the \`${ImageVisionTool.function.name}\` tool to retrieve ${
|
||||
plural ? '' : 'a '
|
||||
}detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`;
|
||||
plural ? '' : 'a '
|
||||
}detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`;
|
||||
|
||||
return files;
|
||||
};
|
||||
@@ -583,8 +576,6 @@ const chatV1 = async (req, res) => {
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
endpoint,
|
||||
spec: endpointOption.spec,
|
||||
iconURL: endpointOption.iconURL,
|
||||
};
|
||||
|
||||
sendMessage(res, {
|
||||
|
||||
@@ -428,8 +428,6 @@ const chatV2 = async (req, res) => {
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
endpoint,
|
||||
spec: endpointOption.spec,
|
||||
iconURL: endpointOption.iconURL,
|
||||
};
|
||||
|
||||
sendMessage(res, {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const cookies = require('cookie');
|
||||
const { getOpenIdConfig } = require('~/strategies');
|
||||
const { Issuer } = require('openid-client');
|
||||
const { logoutUser } = require('~/server/services/AuthService');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
@@ -10,29 +10,20 @@ const logoutController = async (req, res) => {
|
||||
const logout = await logoutUser(req, refreshToken);
|
||||
const { status, message } = logout;
|
||||
res.clearCookie('refreshToken');
|
||||
res.clearCookie('token_provider');
|
||||
const response = { message };
|
||||
if (
|
||||
req.user.openidId != null &&
|
||||
isEnabled(process.env.OPENID_USE_END_SESSION_ENDPOINT) &&
|
||||
process.env.OPENID_ISSUER
|
||||
) {
|
||||
const openIdConfig = getOpenIdConfig();
|
||||
if (!openIdConfig) {
|
||||
const issuer = await Issuer.discover(process.env.OPENID_ISSUER);
|
||||
const redirect = issuer.metadata.end_session_endpoint;
|
||||
if (!redirect) {
|
||||
logger.warn(
|
||||
'[logoutController] OpenID config not found. Please verify that the open id configuration and initialization are correct.',
|
||||
'[logoutController] end_session_endpoint not found in OpenID issuer metadata. Please verify that the issuer is correct.',
|
||||
);
|
||||
} else {
|
||||
const endSessionEndpoint = openIdConfig
|
||||
? openIdConfig.serverMetadata().end_session_endpoint
|
||||
: null;
|
||||
if (endSessionEndpoint) {
|
||||
response.redirect = endSessionEndpoint;
|
||||
} else {
|
||||
logger.warn(
|
||||
'[logoutController] end_session_endpoint not found in OpenID issuer metadata. Please verify that the issuer is correct.',
|
||||
);
|
||||
}
|
||||
response.redirect = redirect;
|
||||
}
|
||||
}
|
||||
return res.status(status).send(response);
|
||||
|
||||
@@ -6,7 +6,6 @@ const {
|
||||
Permissions,
|
||||
ToolCallTypes,
|
||||
PermissionTypes,
|
||||
loadWebSearchAuth,
|
||||
} = require('librechat-data-provider');
|
||||
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
|
||||
const { processCodeOutput } = require('~/server/services/Files/Code/process');
|
||||
@@ -25,36 +24,6 @@ const toolAccessPermType = {
|
||||
[Tools.execute_code]: PermissionTypes.RUN_CODE,
|
||||
};
|
||||
|
||||
/**
|
||||
* Verifies web search authentication, ensuring each category has at least
|
||||
* one fully authenticated service.
|
||||
*
|
||||
* @param {ServerRequest} req - The request object
|
||||
* @param {ServerResponse} res - The response object
|
||||
* @returns {Promise<void>} A promise that resolves when the function has completed
|
||||
*/
|
||||
const verifyWebSearchAuth = async (req, res) => {
|
||||
try {
|
||||
const userId = req.user.id;
|
||||
/** @type {TCustomConfig['webSearch']} */
|
||||
const webSearchConfig = req.app.locals?.webSearch || {};
|
||||
const result = await loadWebSearchAuth({
|
||||
userId,
|
||||
loadAuthValues,
|
||||
webSearchConfig,
|
||||
throwError: false,
|
||||
});
|
||||
|
||||
return res.status(200).json({
|
||||
authenticated: result.authenticated,
|
||||
authTypes: result.authTypes,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Error in verifyWebSearchAuth:', error);
|
||||
return res.status(500).json({ message: error.message });
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {ServerRequest} req - The request object, containing information about the HTTP request.
|
||||
* @param {ServerResponse} res - The response object, used to send back the desired HTTP response.
|
||||
@@ -63,9 +32,6 @@ const verifyWebSearchAuth = async (req, res) => {
|
||||
const verifyToolAuth = async (req, res) => {
|
||||
try {
|
||||
const { toolId } = req.params;
|
||||
if (toolId === Tools.web_search) {
|
||||
return await verifyWebSearchAuth(req, res);
|
||||
}
|
||||
const authFields = fieldsMap[toolId];
|
||||
if (!authFields) {
|
||||
res.status(404).json({ message: 'Tool not found' });
|
||||
|
||||
@@ -24,13 +24,10 @@ const routes = require('./routes');
|
||||
|
||||
const { PORT, HOST, ALLOW_SOCIAL_LOGIN, DISABLE_COMPRESSION, TRUST_PROXY } = process.env ?? {};
|
||||
|
||||
// Allow PORT=0 to be used for automatic free port assignment
|
||||
const port = isNaN(Number(PORT)) ? 3080 : Number(PORT);
|
||||
const port = Number(PORT) || 3080;
|
||||
const host = HOST || 'localhost';
|
||||
const trusted_proxy = Number(TRUST_PROXY) || 1; /* trust first proxy by default */
|
||||
|
||||
const app = express();
|
||||
|
||||
const startServer = async () => {
|
||||
if (typeof Bun !== 'undefined') {
|
||||
axios.defaults.headers.common['Accept-Encoding'] = 'gzip';
|
||||
@@ -39,9 +36,8 @@ const startServer = async () => {
|
||||
logger.info('Connected to MongoDB');
|
||||
await indexSync();
|
||||
|
||||
const app = express();
|
||||
app.disable('x-powered-by');
|
||||
app.set('trust proxy', trusted_proxy);
|
||||
|
||||
await AppService(app);
|
||||
|
||||
const indexPath = path.join(app.locals.paths.dist, 'index.html');
|
||||
@@ -53,29 +49,28 @@ const startServer = async () => {
|
||||
app.use(noIndex);
|
||||
app.use(errorController);
|
||||
app.use(express.json({ limit: '3mb' }));
|
||||
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
||||
app.use(mongoSanitize());
|
||||
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
||||
app.use(staticCache(app.locals.paths.dist));
|
||||
app.use(staticCache(app.locals.paths.fonts));
|
||||
app.use(staticCache(app.locals.paths.assets));
|
||||
app.set('trust proxy', trusted_proxy);
|
||||
app.use(cors());
|
||||
app.use(cookieParser());
|
||||
|
||||
if (!isEnabled(DISABLE_COMPRESSION)) {
|
||||
app.use(compression());
|
||||
} else {
|
||||
console.warn('Response compression has been disabled via DISABLE_COMPRESSION.');
|
||||
}
|
||||
|
||||
// Serve static assets with aggressive caching
|
||||
app.use(staticCache(app.locals.paths.dist));
|
||||
app.use(staticCache(app.locals.paths.fonts));
|
||||
app.use(staticCache(app.locals.paths.assets));
|
||||
|
||||
if (!ALLOW_SOCIAL_LOGIN) {
|
||||
console.warn('Social logins are disabled. Set ALLOW_SOCIAL_LOGIN=true to enable them.');
|
||||
console.warn(
|
||||
'Social logins are disabled. Set Environment Variable "ALLOW_SOCIAL_LOGIN" to true to enable them.',
|
||||
);
|
||||
}
|
||||
|
||||
/* OAUTH */
|
||||
app.use(passport.initialize());
|
||||
passport.use(jwtLogin());
|
||||
passport.use(await jwtLogin());
|
||||
passport.use(passportLogin());
|
||||
|
||||
/* LDAP Auth */
|
||||
@@ -84,7 +79,7 @@ const startServer = async () => {
|
||||
}
|
||||
|
||||
if (isEnabled(ALLOW_SOCIAL_LOGIN)) {
|
||||
await configureSocialLogins(app);
|
||||
configureSocialLogins(app);
|
||||
}
|
||||
|
||||
app.use('/oauth', routes.oauth);
|
||||
@@ -133,7 +128,7 @@ const startServer = async () => {
|
||||
});
|
||||
|
||||
app.listen(port, host, () => {
|
||||
if (host === '0.0.0.0') {
|
||||
if (host == '0.0.0.0') {
|
||||
logger.info(
|
||||
`Server listening on all interfaces at port ${port}. Use http://localhost:${port} to access it`,
|
||||
);
|
||||
@@ -181,6 +176,3 @@ process.on('uncaughtException', (err) => {
|
||||
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
// export app for easier testing purposes
|
||||
module.exports = app;
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const request = require('supertest');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const mongoose = require('mongoose');
|
||||
|
||||
describe('Server Configuration', () => {
|
||||
// Increase the default timeout to allow for Mongo cleanup
|
||||
jest.setTimeout(30_000);
|
||||
|
||||
let mongoServer;
|
||||
let app;
|
||||
|
||||
/** Mocked fs.readFileSync for index.html */
|
||||
const originalReadFileSync = fs.readFileSync;
|
||||
beforeAll(() => {
|
||||
fs.readFileSync = function (filepath, options) {
|
||||
if (filepath.includes('index.html')) {
|
||||
return '<!DOCTYPE html><html><head><title>LibreChat</title></head><body><div id="root"></div></body></html>';
|
||||
}
|
||||
return originalReadFileSync(filepath, options);
|
||||
};
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
// Restore original fs.readFileSync
|
||||
fs.readFileSync = originalReadFileSync;
|
||||
});
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
process.env.MONGO_URI = mongoServer.getUri();
|
||||
process.env.PORT = '0'; // Use a random available port
|
||||
app = require('~/server');
|
||||
|
||||
// Wait for the app to be healthy
|
||||
await healthCheckPoll(app);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoServer.stop();
|
||||
await mongoose.disconnect();
|
||||
});
|
||||
|
||||
it('should return OK for /health', async () => {
|
||||
const response = await request(app).get('/health');
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.text).toBe('OK');
|
||||
});
|
||||
|
||||
it('should not cache index page', async () => {
|
||||
const response = await request(app).get('/');
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.headers['cache-control']).toBe('no-cache, no-store, must-revalidate');
|
||||
expect(response.headers['pragma']).toBe('no-cache');
|
||||
expect(response.headers['expires']).toBe('0');
|
||||
});
|
||||
});
|
||||
|
||||
// Polls the /health endpoint every 30ms for up to 10 seconds to wait for the server to start completely
|
||||
async function healthCheckPoll(app, retries = 0) {
|
||||
const maxRetries = Math.floor(10000 / 30); // 10 seconds / 30ms
|
||||
try {
|
||||
const response = await request(app).get('/health');
|
||||
if (response.status === 200) {
|
||||
return; // App is healthy
|
||||
}
|
||||
} catch (error) {
|
||||
// Ignore connection errors during polling
|
||||
}
|
||||
|
||||
if (retries < maxRetries) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 30));
|
||||
await healthCheckPoll(app, retries + 1);
|
||||
} else {
|
||||
throw new Error('App did not become healthy within 10 seconds.');
|
||||
}
|
||||
}
|
||||
@@ -311,7 +311,7 @@ const handleAbortError = async (res, req, error, data) => {
|
||||
} else {
|
||||
logger.error('[handleAbortError] AI response error; aborting request:', error);
|
||||
}
|
||||
const { sender, conversationId, messageId, parentMessageId, userMessageId, partialText } = data;
|
||||
const { sender, conversationId, messageId, parentMessageId, partialText } = data;
|
||||
|
||||
if (error.stack && error.stack.includes('google')) {
|
||||
logger.warn(
|
||||
@@ -344,10 +344,10 @@ const handleAbortError = async (res, req, error, data) => {
|
||||
parentMessageId,
|
||||
text: errorText,
|
||||
user: req.user.id,
|
||||
shouldSaveMessage: true,
|
||||
spec: endpointOption?.spec,
|
||||
iconURL: endpointOption?.iconURL,
|
||||
modelLabel: endpointOption?.modelLabel,
|
||||
shouldSaveMessage: userMessageId != null,
|
||||
model: endpointOption?.modelOptions?.model || req.body?.model,
|
||||
};
|
||||
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
const cookies = require('cookie');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const passport = require('passport');
|
||||
|
||||
// This middleware does not require authentication,
|
||||
// but if the user is authenticated, it will set the user object.
|
||||
const optionalJwtAuth = (req, res, next) => {
|
||||
const cookieHeader = req.headers.cookie;
|
||||
const tokenProvider = cookieHeader ? cookies.parse(cookieHeader).token_provider : null;
|
||||
const callback = (err, user) => {
|
||||
passport.authenticate('jwt', { session: false }, (err, user) => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
@@ -15,11 +11,7 @@ const optionalJwtAuth = (req, res, next) => {
|
||||
req.user = user;
|
||||
}
|
||||
next();
|
||||
};
|
||||
if (tokenProvider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS)) {
|
||||
return passport.authenticate('openidJwt', { session: false }, callback)(req, res, next);
|
||||
}
|
||||
passport.authenticate('jwt', { session: false }, callback)(req, res, next);
|
||||
})(req, res, next);
|
||||
};
|
||||
|
||||
module.exports = optionalJwtAuth;
|
||||
|
||||
@@ -1,23 +1,5 @@
|
||||
const passport = require('passport');
|
||||
const cookies = require('cookie');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
/**
|
||||
* Custom Middleware to handle JWT authentication, with support for OpenID token reuse
|
||||
* Switches between JWT and OpenID authentication based on cookies and environment settings
|
||||
*/
|
||||
const requireJwtAuth = (req, res, next) => {
|
||||
// Check if token provider is specified in cookies
|
||||
const cookieHeader = req.headers.cookie;
|
||||
const tokenProvider = cookieHeader ? cookies.parse(cookieHeader).token_provider : null;
|
||||
|
||||
// Use OpenID authentication if token provider is OpenID and OPENID_REUSE_TOKENS is enabled
|
||||
if (tokenProvider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS)) {
|
||||
return passport.authenticate('openidJwt', { session: false })(req, res, next);
|
||||
}
|
||||
|
||||
// Default to standard JWT authentication
|
||||
return passport.authenticate('jwt', { session: false })(req, res, next);
|
||||
};
|
||||
const requireJwtAuth = passport.authenticate('jwt', { session: false });
|
||||
|
||||
module.exports = requireJwtAuth;
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
jest.mock('~/cache/getLogStores');
|
||||
const request = require('supertest');
|
||||
const express = require('express');
|
||||
const configRoute = require('../config');
|
||||
const routes = require('../');
|
||||
// file deepcode ignore UseCsurfForExpress/test: test
|
||||
const app = express();
|
||||
app.disable('x-powered-by');
|
||||
app.use('/api/config', configRoute);
|
||||
app.use('/api/config', routes.config);
|
||||
|
||||
afterEach(() => {
|
||||
delete process.env.APP_TITLE;
|
||||
|
||||
@@ -107,7 +107,7 @@ router.post('/:agent_id', async (req, res) => {
|
||||
.filter((tool) => !(tool && (tool.includes(domain) || tool.includes(action_id))))
|
||||
.concat(functions.map((tool) => `${tool.function.name}${actionDelimiter}${domain}`));
|
||||
|
||||
const updatedAgent = await updateAgent(agentQuery, { tools, actions }, req.user.id);
|
||||
const updatedAgent = await updateAgent(agentQuery, { tools, actions });
|
||||
|
||||
// Only update user field for new actions
|
||||
const actionUpdateData = { metadata, agent_id };
|
||||
@@ -172,7 +172,7 @@ router.delete('/:agent_id/:action_id', async (req, res) => {
|
||||
|
||||
const updatedTools = tools.filter((tool) => !(tool && tool.includes(domain)));
|
||||
|
||||
await updateAgent(agentQuery, { tools: updatedTools, actions: updatedActions }, req.user.id);
|
||||
await updateAgent(agentQuery, { tools: updatedTools, actions: updatedActions });
|
||||
// If admin, can delete any action, otherwise only user's actions
|
||||
const actionQuery = admin ? { action_id } : { action_id, user: req.user.id };
|
||||
await deleteAction(actionQuery);
|
||||
|
||||
@@ -78,15 +78,6 @@ router.post('/:id/duplicate', checkAgentCreate, v1.duplicateAgent);
|
||||
*/
|
||||
router.delete('/:id', checkAgentCreate, v1.deleteAgent);
|
||||
|
||||
/**
|
||||
* Reverts an agent to a previous version.
|
||||
* @route POST /agents/:id/revert
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @param {number} req.body.version_index - Index of the version to revert to.
|
||||
* @returns {Agent} 200 - success response - application/json
|
||||
*/
|
||||
router.post('/:id/revert', checkGlobalAgentShare, v1.revertAgentVersion);
|
||||
|
||||
/**
|
||||
* Returns a list of agents.
|
||||
* @route GET /agents
|
||||
|
||||
@@ -75,7 +75,6 @@ router.get('/', async function (req, res) {
|
||||
process.env.SHOW_BIRTHDAY_ICON === '',
|
||||
helpAndFaqURL: process.env.HELP_AND_FAQ_URL || 'https://librechat.ai',
|
||||
interface: req.app.locals.interfaceConfig,
|
||||
turnstile: req.app.locals.turnstileConfig,
|
||||
modelSpecs: req.app.locals.modelSpecs,
|
||||
balance: req.app.locals.balance,
|
||||
sharedLinksEnabled,
|
||||
@@ -85,26 +84,6 @@ router.get('/', async function (req, res) {
|
||||
bundlerURL: process.env.SANDPACK_BUNDLER_URL,
|
||||
staticBundlerURL: process.env.SANDPACK_STATIC_BUNDLER_URL,
|
||||
};
|
||||
/** @type {TCustomConfig['webSearch']} */
|
||||
const webSearchConfig = req.app.locals.webSearch;
|
||||
if (
|
||||
webSearchConfig != null &&
|
||||
(webSearchConfig.searchProvider ||
|
||||
webSearchConfig.scraperType ||
|
||||
webSearchConfig.rerankerType)
|
||||
) {
|
||||
payload.webSearch = {};
|
||||
}
|
||||
|
||||
if (webSearchConfig?.searchProvider) {
|
||||
payload.webSearch.searchProvider = webSearchConfig.searchProvider;
|
||||
}
|
||||
if (webSearchConfig?.scraperType) {
|
||||
payload.webSearch.scraperType = webSearchConfig.scraperType;
|
||||
}
|
||||
if (webSearchConfig?.rerankerType) {
|
||||
payload.webSearch.rerankerType = webSearchConfig.rerankerType;
|
||||
}
|
||||
|
||||
if (ldap) {
|
||||
payload.ldap = ldap;
|
||||
|
||||
@@ -74,7 +74,7 @@ router.post('/gen_title', async (req, res) => {
|
||||
res.status(200).json({ title });
|
||||
} else {
|
||||
res.status(404).json({
|
||||
message: "Title not found or method not implemented for the conversation's endpoint",
|
||||
message: 'Title not found or method not implemented for the conversation\'s endpoint',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
@@ -21,7 +21,6 @@ const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
|
||||
const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { refreshS3FileUrls } = require('~/server/services/Files/S3/crud');
|
||||
const { getFiles, batchUpdateFiles } = require('~/models/File');
|
||||
const { getAssistant } = require('~/models/Assistant');
|
||||
const { getAgent } = require('~/models/Agent');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { logger } = require('~/config');
|
||||
@@ -95,7 +94,7 @@ router.delete('/', async (req, res) => {
|
||||
});
|
||||
}
|
||||
|
||||
/* Handle agent unlinking even if no valid files to delete */
|
||||
/* Handle entity unlinking even if no valid files to delete */
|
||||
if (req.body.agent_id && req.body.tool_resource && dbFiles.length === 0) {
|
||||
const agent = await getAgent({
|
||||
id: req.body.agent_id,
|
||||
@@ -105,32 +104,10 @@ router.delete('/', async (req, res) => {
|
||||
const agentFiles = files.filter((f) => toolResourceFiles.includes(f.file_id));
|
||||
|
||||
await processDeleteRequest({ req, files: agentFiles });
|
||||
res.status(200).json({ message: 'File associations removed successfully from agent' });
|
||||
res.status(200).json({ message: 'File associations removed successfully' });
|
||||
return;
|
||||
}
|
||||
|
||||
/* Handle assistant unlinking even if no valid files to delete */
|
||||
if (req.body.assistant_id && req.body.tool_resource && dbFiles.length === 0) {
|
||||
const assistant = await getAssistant({
|
||||
id: req.body.assistant_id,
|
||||
});
|
||||
|
||||
const toolResourceFiles = assistant.tool_resources?.[req.body.tool_resource]?.file_ids ?? [];
|
||||
const assistantFiles = files.filter((f) => toolResourceFiles.includes(f.file_id));
|
||||
|
||||
await processDeleteRequest({ req, files: assistantFiles });
|
||||
res.status(200).json({ message: 'File associations removed successfully from assistant' });
|
||||
return;
|
||||
} else if (
|
||||
req.body.assistant_id &&
|
||||
req.body.files?.[0]?.filepath === EModelEndpoint.azureAssistants
|
||||
) {
|
||||
await processDeleteRequest({ req, files: req.body.files });
|
||||
return res
|
||||
.status(200)
|
||||
.json({ message: 'File associations removed successfully from Azure Assistant' });
|
||||
}
|
||||
|
||||
await processDeleteRequest({ req, files: dbFiles });
|
||||
|
||||
logger.debug(
|
||||
|
||||
@@ -8,9 +8,8 @@ const {
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
} = require('~/server/middleware');
|
||||
const { setAuthTokens, setOpenIDAuthTokens } = require('~/server/services/AuthService');
|
||||
const { setAuthTokens } = require('~/server/services/AuthService');
|
||||
const { logger } = require('~/config');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
@@ -29,15 +28,7 @@ const oauthHandler = async (req, res) => {
|
||||
if (req.banned) {
|
||||
return;
|
||||
}
|
||||
if (
|
||||
req.user &&
|
||||
req.user.provider == 'openid' &&
|
||||
isEnabled(process.env.OPENID_REUSE_TOKENS) === true
|
||||
) {
|
||||
setOpenIDAuthTokens(req.user.tokenset, res);
|
||||
} else {
|
||||
await setAuthTokens(req.user._id, res);
|
||||
}
|
||||
await setAuthTokens(req.user._id, res);
|
||||
res.redirect(domains.client);
|
||||
} catch (err) {
|
||||
logger.error('Error in setting authentication tokens:', err);
|
||||
|
||||
@@ -1,17 +1,40 @@
|
||||
const { Keyv } = require('keyv');
|
||||
const express = require('express');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const { Conversation } = require('~/models/Conversation');
|
||||
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
||||
const { Message } = require('~/models/Message');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const keyvRedis = require('~/cache/keyvRedis');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const expiration = 60 * 1000;
|
||||
const cache = isEnabled(process.env.USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: 'search', ttl: expiration });
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
|
||||
router.get('/enable', async function (req, res) {
|
||||
if (!isEnabled(process.env.SEARCH)) {
|
||||
return res.send(false);
|
||||
}
|
||||
router.get('/sync', async function (req, res) {
|
||||
await Message.syncWithMeili();
|
||||
await Conversation.syncWithMeili();
|
||||
res.send('synced');
|
||||
});
|
||||
|
||||
router.get('/test', async function (req, res) {
|
||||
const { q } = req.query;
|
||||
const messages = (
|
||||
await Message.meiliSearch(q, { attributesToHighlight: ['text'] }, true)
|
||||
).hits.map((message) => {
|
||||
const { _formatted, ...rest } = message;
|
||||
return { ...rest, searchResult: true, text: _formatted.text };
|
||||
});
|
||||
res.send(messages);
|
||||
});
|
||||
|
||||
router.get('/enable', async function (req, res) {
|
||||
let result = false;
|
||||
try {
|
||||
const client = new MeiliSearch({
|
||||
host: process.env.MEILI_HOST,
|
||||
@@ -19,7 +42,8 @@ router.get('/enable', async function (req, res) {
|
||||
});
|
||||
|
||||
const { status } = await client.health();
|
||||
return res.send(status === 'available');
|
||||
result = status === 'available' && !!process.env.SEARCH;
|
||||
return res.send(result);
|
||||
} catch (error) {
|
||||
return res.send(false);
|
||||
}
|
||||
|
||||
@@ -146,7 +146,7 @@ async function createActionTool({
|
||||
/** @type {import('librechat-data-provider').ActionMetadataRuntime} */
|
||||
const metadata = action.metadata;
|
||||
const executor = requestBuilder.createExecutor();
|
||||
const preparedExecutor = executor.setParams(toolInput ?? {});
|
||||
const preparedExecutor = executor.setParams(toolInput);
|
||||
|
||||
if (metadata.auth && metadata.auth.type !== AuthTypeEnum.None) {
|
||||
try {
|
||||
|
||||
@@ -25,7 +25,6 @@ jest.mock('./start/checks', () => ({
|
||||
checkHealth: jest.fn(),
|
||||
checkConfig: jest.fn(),
|
||||
checkAzureVariables: jest.fn(),
|
||||
checkWebSearchConfig: jest.fn(),
|
||||
}));
|
||||
|
||||
const AppService = require('./AppService');
|
||||
|
||||
@@ -1,25 +1,17 @@
|
||||
const {
|
||||
FileSources,
|
||||
EModelEndpoint,
|
||||
loadOCRConfig,
|
||||
processMCPEnv,
|
||||
EModelEndpoint,
|
||||
getConfigDefaults,
|
||||
loadWebSearchConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
checkHealth,
|
||||
checkConfig,
|
||||
checkVariables,
|
||||
checkAzureVariables,
|
||||
checkWebSearchConfig,
|
||||
} = require('./start/checks');
|
||||
const { checkVariables, checkHealth, checkConfig, checkAzureVariables } = require('./start/checks');
|
||||
const { azureAssistantsDefaults, assistantsConfigSetup } = require('./start/assistants');
|
||||
const { initializeAzureBlobService } = require('./Files/Azure/initialize');
|
||||
const { initializeFirebase } = require('./Files/Firebase/initialize');
|
||||
const loadCustomConfig = require('./Config/loadCustomConfig');
|
||||
const handleRateLimits = require('./Config/handleRateLimits');
|
||||
const { loadDefaultInterface } = require('./start/interface');
|
||||
const { loadTurnstileConfig } = require('./start/turnstile');
|
||||
const { azureConfigSetup } = require('./start/azureOpenAI');
|
||||
const { processModelSpecs } = require('./start/modelSpecs');
|
||||
const { initializeS3 } = require('./Files/S3/initialize');
|
||||
@@ -31,6 +23,7 @@ const { getMCPManager } = require('~/config');
|
||||
const paths = require('~/config/paths');
|
||||
|
||||
/**
|
||||
*
|
||||
* Loads custom config and initializes app-wide variables.
|
||||
* @function AppService
|
||||
* @param {Express.Application} app - The Express application object.
|
||||
@@ -42,8 +35,6 @@ const AppService = async (app) => {
|
||||
const configDefaults = getConfigDefaults();
|
||||
|
||||
const ocr = loadOCRConfig(config.ocr);
|
||||
const webSearch = loadWebSearchConfig(config.webSearch);
|
||||
checkWebSearchConfig(webSearch);
|
||||
const filteredTools = config.filteredTools;
|
||||
const includedTools = config.includedTools;
|
||||
const fileStrategy = config.fileStrategy ?? configDefaults.fileStrategy;
|
||||
@@ -83,12 +74,10 @@ const AppService = async (app) => {
|
||||
const socialLogins =
|
||||
config?.registration?.socialLogins ?? configDefaults?.registration?.socialLogins;
|
||||
const interfaceConfig = await loadDefaultInterface(config, configDefaults);
|
||||
const turnstileConfig = loadTurnstileConfig(config, configDefaults);
|
||||
|
||||
const defaultLocals = {
|
||||
ocr,
|
||||
paths,
|
||||
webSearch,
|
||||
fileStrategy,
|
||||
socialLogins,
|
||||
filteredTools,
|
||||
@@ -96,7 +85,6 @@ const AppService = async (app) => {
|
||||
availableTools,
|
||||
imageOutputType,
|
||||
interfaceConfig,
|
||||
turnstileConfig,
|
||||
balance,
|
||||
};
|
||||
|
||||
|
||||
@@ -46,12 +46,6 @@ jest.mock('./ToolService', () => ({
|
||||
},
|
||||
}),
|
||||
}));
|
||||
jest.mock('./start/turnstile', () => ({
|
||||
loadTurnstileConfig: jest.fn(() => ({
|
||||
siteKey: 'default-site-key',
|
||||
options: {},
|
||||
})),
|
||||
}));
|
||||
|
||||
const azureGroups = [
|
||||
{
|
||||
@@ -92,10 +86,6 @@ const azureGroups = [
|
||||
|
||||
describe('AppService', () => {
|
||||
let app;
|
||||
const mockedTurnstileConfig = {
|
||||
siteKey: 'default-site-key',
|
||||
options: {},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
app = { locals: {} };
|
||||
@@ -117,7 +107,6 @@ describe('AppService', () => {
|
||||
sidePanel: true,
|
||||
presets: true,
|
||||
}),
|
||||
turnstileConfig: mockedTurnstileConfig,
|
||||
modelSpecs: undefined,
|
||||
availableTools: {
|
||||
ExampleTool: {
|
||||
@@ -141,14 +130,6 @@ describe('AppService', () => {
|
||||
balance: { enabled: true },
|
||||
filteredTools: undefined,
|
||||
includedTools: undefined,
|
||||
webSearch: {
|
||||
cohereApiKey: '${COHERE_API_KEY}',
|
||||
firecrawlApiKey: '${FIRECRAWL_API_KEY}',
|
||||
firecrawlApiUrl: '${FIRECRAWL_API_URL}',
|
||||
jinaApiKey: '${JINA_API_KEY}',
|
||||
safeSearch: 1,
|
||||
serperApiKey: '${SERPER_API_KEY}',
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
@@ -545,7 +526,7 @@ describe('AppService updating app.locals and issuing warnings', () => {
|
||||
const { logger } = require('~/config');
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
"The 'assistants' endpoint has both 'supportedIds' and 'excludedIds' defined.",
|
||||
'The \'assistants\' endpoint has both \'supportedIds\' and \'excludedIds\' defined.',
|
||||
),
|
||||
);
|
||||
});
|
||||
@@ -567,7 +548,7 @@ describe('AppService updating app.locals and issuing warnings', () => {
|
||||
const { logger } = require('~/config');
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
"The 'assistants' endpoint has both 'privateAssistants' and 'supportedIds' or 'excludedIds' defined.",
|
||||
'The \'assistants\' endpoint has both \'privateAssistants\' and \'supportedIds\' or \'excludedIds\' defined.',
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
@@ -56,7 +56,7 @@ const logoutUser = async (req, refreshToken) => {
|
||||
try {
|
||||
req.session.destroy();
|
||||
} catch (destroyErr) {
|
||||
logger.debug('[logoutUser] Failed to destroy session.', destroyErr);
|
||||
logger.error('[logoutUser] Failed to destroy session.', destroyErr);
|
||||
}
|
||||
|
||||
return { status: 200, message: 'Logout successful' };
|
||||
@@ -377,62 +377,13 @@ const setAuthTokens = async (userId, res, sessionId = null) => {
|
||||
secure: isProduction,
|
||||
sameSite: 'strict',
|
||||
});
|
||||
res.cookie('token_provider', 'librechat', {
|
||||
expires: new Date(refreshTokenExpires),
|
||||
httpOnly: true,
|
||||
secure: isProduction,
|
||||
sameSite: 'strict',
|
||||
});
|
||||
|
||||
return token;
|
||||
} catch (error) {
|
||||
logger.error('[setAuthTokens] Error in setting authentication tokens:', error);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
/**
|
||||
* @function setOpenIDAuthTokens
|
||||
* Set OpenID Authentication Tokens
|
||||
* //type tokenset from openid-client
|
||||
* @param {import('openid-client').TokenEndpointResponse & import('openid-client').TokenEndpointResponseHelpers} tokenset
|
||||
* - The tokenset object containing access and refresh tokens
|
||||
* @param {Object} res - response object
|
||||
* @returns {String} - access token
|
||||
*/
|
||||
const setOpenIDAuthTokens = (tokenset, res) => {
|
||||
try {
|
||||
if (!tokenset) {
|
||||
logger.error('[setOpenIDAuthTokens] No tokenset found in request');
|
||||
return;
|
||||
}
|
||||
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
|
||||
const expiryInMilliseconds = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7; // 7 days default
|
||||
const expirationDate = new Date(Date.now() + expiryInMilliseconds);
|
||||
if (tokenset == null) {
|
||||
logger.error('[setOpenIDAuthTokens] No tokenset found in request');
|
||||
return;
|
||||
}
|
||||
if (!tokenset.access_token || !tokenset.refresh_token) {
|
||||
logger.error('[setOpenIDAuthTokens] No access or refresh token found in tokenset');
|
||||
return;
|
||||
}
|
||||
res.cookie('refreshToken', tokenset.refresh_token, {
|
||||
expires: expirationDate,
|
||||
httpOnly: true,
|
||||
secure: isProduction,
|
||||
sameSite: 'strict',
|
||||
});
|
||||
res.cookie('token_provider', 'openid', {
|
||||
expires: expirationDate,
|
||||
httpOnly: true,
|
||||
secure: isProduction,
|
||||
sameSite: 'strict',
|
||||
});
|
||||
return tokenset.access_token;
|
||||
} catch (error) {
|
||||
logger.error('[setOpenIDAuthTokens] Error in setting authentication tokens:', error);
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Resend Verification Email
|
||||
@@ -501,5 +452,4 @@ module.exports = {
|
||||
resetPassword,
|
||||
requestPasswordReset,
|
||||
resendVerificationEmail,
|
||||
setOpenIDAuthTokens,
|
||||
};
|
||||
|
||||
@@ -10,7 +10,17 @@ const getLogStores = require('~/cache/getLogStores');
|
||||
* */
|
||||
async function getCustomConfig() {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
return (await cache.get(CacheKeys.CUSTOM_CONFIG)) || (await loadCustomConfig());
|
||||
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
|
||||
|
||||
if (!customConfig) {
|
||||
customConfig = await loadCustomConfig();
|
||||
}
|
||||
|
||||
if (!customConfig) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return customConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -29,14 +29,7 @@ async function loadConfigEndpoints(req) {
|
||||
|
||||
for (let i = 0; i < customEndpoints.length; i++) {
|
||||
const endpoint = customEndpoints[i];
|
||||
const {
|
||||
baseURL,
|
||||
apiKey,
|
||||
name: configName,
|
||||
iconURL,
|
||||
modelDisplayLabel,
|
||||
customParams,
|
||||
} = endpoint;
|
||||
const { baseURL, apiKey, name: configName, iconURL, modelDisplayLabel } = endpoint;
|
||||
const name = normalizeEndpointName(configName);
|
||||
|
||||
const resolvedApiKey = extractEnvVariable(apiKey);
|
||||
@@ -48,7 +41,6 @@ async function loadConfigEndpoints(req) {
|
||||
userProvideURL: isUserProvided(resolvedBaseURL),
|
||||
modelDisplayLabel,
|
||||
iconURL,
|
||||
customParams,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,18 +1,10 @@
|
||||
const path = require('path');
|
||||
const {
|
||||
CacheKeys,
|
||||
configSchema,
|
||||
EImageOutputType,
|
||||
validateSettingDefinitions,
|
||||
agentParamSettings,
|
||||
paramSettings,
|
||||
} = require('librechat-data-provider');
|
||||
const { CacheKeys, configSchema, EImageOutputType } = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const loadYaml = require('~/utils/loadYaml');
|
||||
const { logger } = require('~/config');
|
||||
const axios = require('axios');
|
||||
const yaml = require('js-yaml');
|
||||
const keyBy = require('lodash/keyBy');
|
||||
|
||||
const projectRoot = path.resolve(__dirname, '..', '..', '..', '..');
|
||||
const defaultConfigPath = path.resolve(projectRoot, 'librechat.yaml');
|
||||
@@ -113,10 +105,6 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
|
||||
logger.debug('Custom config:', customConfig);
|
||||
}
|
||||
|
||||
(customConfig.endpoints?.custom ?? [])
|
||||
.filter((endpoint) => endpoint.customParams)
|
||||
.forEach((endpoint) => parseCustomParams(endpoint.name, endpoint.customParams));
|
||||
|
||||
if (customConfig.cache) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig);
|
||||
@@ -129,52 +117,4 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
|
||||
return customConfig;
|
||||
}
|
||||
|
||||
// Validate and fill out missing values for custom parameters
|
||||
function parseCustomParams(endpointName, customParams) {
|
||||
const paramEndpoint = customParams.defaultParamsEndpoint;
|
||||
customParams.paramDefinitions = customParams.paramDefinitions || [];
|
||||
|
||||
// Checks if `defaultParamsEndpoint` is a key in `paramSettings`.
|
||||
const validEndpoints = new Set([
|
||||
...Object.keys(paramSettings),
|
||||
...Object.keys(agentParamSettings),
|
||||
]);
|
||||
if (!validEndpoints.has(paramEndpoint)) {
|
||||
throw new Error(
|
||||
`defaultParamsEndpoint of "${endpointName}" endpoint is invalid. ` +
|
||||
`Valid options are ${Array.from(validEndpoints).join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
// creates default param maps
|
||||
const regularParams = paramSettings[paramEndpoint] ?? [];
|
||||
const agentParams = agentParamSettings[paramEndpoint] ?? [];
|
||||
const defaultParams = regularParams.concat(agentParams);
|
||||
const defaultParamsMap = keyBy(defaultParams, 'key');
|
||||
|
||||
// TODO: Remove this check once we support new parameters not part of default parameters.
|
||||
// Checks if every key in `paramDefinitions` is valid.
|
||||
const validKeys = new Set(Object.keys(defaultParamsMap));
|
||||
const paramKeys = customParams.paramDefinitions.map((param) => param.key);
|
||||
if (paramKeys.some((key) => !validKeys.has(key))) {
|
||||
throw new Error(
|
||||
`paramDefinitions of "${endpointName}" endpoint contains invalid key(s). ` +
|
||||
`Valid parameter keys are ${Array.from(validKeys).join(', ')}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Fill out missing values for custom param definitions
|
||||
customParams.paramDefinitions = customParams.paramDefinitions.map((param) => {
|
||||
return { ...defaultParamsMap[param.key], ...param, optionType: 'custom' };
|
||||
});
|
||||
|
||||
try {
|
||||
validateSettingDefinitions(customParams.paramDefinitions);
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
`Custom parameter definitions for "${endpointName}" endpoint is malformed: ${e.message}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = loadCustomConfig;
|
||||
|
||||
@@ -1,34 +1,6 @@
|
||||
jest.mock('axios');
|
||||
jest.mock('~/cache/getLogStores');
|
||||
jest.mock('~/utils/loadYaml');
|
||||
jest.mock('librechat-data-provider', () => {
|
||||
const actual = jest.requireActual('librechat-data-provider');
|
||||
return {
|
||||
...actual,
|
||||
paramSettings: { foo: {}, bar: {}, custom: {} },
|
||||
agentParamSettings: {
|
||||
custom: [],
|
||||
google: [
|
||||
{
|
||||
key: 'pressure',
|
||||
type: 'string',
|
||||
component: 'input',
|
||||
},
|
||||
{
|
||||
key: 'temperature',
|
||||
type: 'number',
|
||||
component: 'slider',
|
||||
default: 0.5,
|
||||
range: {
|
||||
min: 0,
|
||||
max: 2,
|
||||
step: 0.01,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
const axios = require('axios');
|
||||
const loadCustomConfig = require('./loadCustomConfig');
|
||||
@@ -178,126 +150,4 @@ describe('loadCustomConfig', () => {
|
||||
expect(logger.info).toHaveBeenCalledWith(JSON.stringify(mockConfig, null, 2));
|
||||
expect(logger.debug).toHaveBeenCalledWith('Custom config:', mockConfig);
|
||||
});
|
||||
|
||||
describe('parseCustomParams', () => {
|
||||
const mockConfig = {
|
||||
version: '1.0',
|
||||
cache: false,
|
||||
endpoints: {
|
||||
custom: [
|
||||
{
|
||||
name: 'Google',
|
||||
apiKey: 'user_provided',
|
||||
customParams: {},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
async function loadCustomParams(customParams) {
|
||||
mockConfig.endpoints.custom[0].customParams = customParams;
|
||||
loadYaml.mockReturnValue(mockConfig);
|
||||
return await loadCustomConfig();
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetAllMocks();
|
||||
process.env.CONFIG_PATH = 'validConfig.yaml';
|
||||
});
|
||||
|
||||
it('returns no error when customParams is undefined', async () => {
|
||||
const result = await loadCustomParams(undefined);
|
||||
expect(result).toEqual(mockConfig);
|
||||
});
|
||||
|
||||
it('returns no error when customParams is valid', async () => {
|
||||
const result = await loadCustomParams({
|
||||
defaultParamsEndpoint: 'google',
|
||||
paramDefinitions: [
|
||||
{
|
||||
key: 'temperature',
|
||||
default: 0.5,
|
||||
},
|
||||
],
|
||||
});
|
||||
expect(result).toEqual(mockConfig);
|
||||
});
|
||||
|
||||
it('throws an error when paramDefinitions contain unsupported keys', async () => {
|
||||
const malformedCustomParams = {
|
||||
defaultParamsEndpoint: 'google',
|
||||
paramDefinitions: [
|
||||
{ key: 'temperature', default: 0.5 },
|
||||
{ key: 'unsupportedKey', range: 0.5 },
|
||||
],
|
||||
};
|
||||
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
|
||||
'paramDefinitions of "Google" endpoint contains invalid key(s). Valid parameter keys are pressure, temperature',
|
||||
);
|
||||
});
|
||||
|
||||
it('throws an error when paramDefinitions is malformed', async () => {
|
||||
const malformedCustomParams = {
|
||||
defaultParamsEndpoint: 'google',
|
||||
paramDefinitions: [
|
||||
{
|
||||
key: 'temperature',
|
||||
type: 'noomba',
|
||||
component: 'inpoot',
|
||||
optionType: 'custom',
|
||||
},
|
||||
],
|
||||
};
|
||||
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
|
||||
/Custom parameter definitions for "Google" endpoint is malformed:/,
|
||||
);
|
||||
});
|
||||
|
||||
it('throws an error when defaultParamsEndpoint is not provided', async () => {
|
||||
const malformedCustomParams = { defaultParamsEndpoint: undefined };
|
||||
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
|
||||
'defaultParamsEndpoint of "Google" endpoint is invalid. Valid options are foo, bar, custom, google',
|
||||
);
|
||||
});
|
||||
|
||||
it('fills the paramDefinitions with missing values', async () => {
|
||||
const customParams = {
|
||||
defaultParamsEndpoint: 'google',
|
||||
paramDefinitions: [
|
||||
{ key: 'temperature', default: 0.7, range: { min: 0.1, max: 0.9, step: 0.1 } },
|
||||
{ key: 'pressure', component: 'textarea' },
|
||||
],
|
||||
};
|
||||
|
||||
const parsedConfig = await loadCustomParams(customParams);
|
||||
const paramDefinitions = parsedConfig.endpoints.custom[0].customParams.paramDefinitions;
|
||||
expect(paramDefinitions).toEqual([
|
||||
{
|
||||
columnSpan: 1,
|
||||
component: 'slider',
|
||||
default: 0.7, // overridden
|
||||
includeInput: true,
|
||||
key: 'temperature',
|
||||
label: 'temperature',
|
||||
optionType: 'custom',
|
||||
range: {
|
||||
// overridden
|
||||
max: 0.9,
|
||||
min: 0.1,
|
||||
step: 0.1,
|
||||
},
|
||||
type: 'number',
|
||||
},
|
||||
{
|
||||
columnSpan: 1,
|
||||
component: 'textarea', // overridden
|
||||
key: 'pressure',
|
||||
label: 'pressure',
|
||||
optionType: 'custom',
|
||||
placeholder: '',
|
||||
type: 'string',
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -6,7 +6,6 @@ const {
|
||||
EToolResources,
|
||||
getResponseSender,
|
||||
AgentCapabilities,
|
||||
replaceSpecialVars,
|
||||
providerEndpointMap,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
@@ -233,13 +232,6 @@ const initializeAgentOptions = async ({
|
||||
endpointOption: _endpointOption,
|
||||
});
|
||||
|
||||
if (
|
||||
agent.endpoint === EModelEndpoint.azureOpenAI &&
|
||||
options.llmConfig?.azureOpenAIApiInstanceName == null
|
||||
) {
|
||||
agent.provider = Providers.OPENAI;
|
||||
}
|
||||
|
||||
if (options.provider != null) {
|
||||
agent.provider = options.provider;
|
||||
}
|
||||
@@ -254,13 +246,6 @@ const initializeAgentOptions = async ({
|
||||
agent.model_parameters.model = agent.model;
|
||||
}
|
||||
|
||||
if (agent.instructions && agent.instructions !== '') {
|
||||
agent.instructions = replaceSpecialVars({
|
||||
text: agent.instructions,
|
||||
user: req.user,
|
||||
});
|
||||
}
|
||||
|
||||
if (typeof agent.artifacts === 'string' && agent.artifacts !== '') {
|
||||
agent.additional_instructions = generateArtifactsPrompt({
|
||||
endpoint: agent.provider,
|
||||
|
||||
@@ -15,14 +15,20 @@ function checkPromptCacheSupport(modelName) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return (
|
||||
/claude-3[-.]7/.test(modelMatch) ||
|
||||
/claude-3[-.]5-(?:sonnet|haiku)/.test(modelMatch) ||
|
||||
/claude-3-(?:sonnet|haiku|opus)?/.test(modelMatch) ||
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
|
||||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(modelMatch) ||
|
||||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(modelMatch)
|
||||
);
|
||||
if (
|
||||
modelMatch === 'claude-3-7-sonnet' ||
|
||||
modelMatch === 'claude-3-5-sonnet' ||
|
||||
modelMatch === 'claude-3-5-haiku' ||
|
||||
modelMatch === 'claude-3-haiku' ||
|
||||
modelMatch === 'claude-3-opus' ||
|
||||
modelMatch === 'claude-3.7-sonnet' ||
|
||||
modelMatch === 'claude-3.5-sonnet' ||
|
||||
modelMatch === 'claude-3.5-haiku'
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -45,14 +51,6 @@ function getClaudeHeaders(model, supportsCacheControl) {
|
||||
'anthropic-beta':
|
||||
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
|
||||
};
|
||||
} else if (
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(model) ||
|
||||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(model) ||
|
||||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(model)
|
||||
) {
|
||||
return {
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
'anthropic-beta': 'prompt-caching-2024-07-31',
|
||||
@@ -74,8 +72,7 @@ function configureReasoning(anthropicInput, extendedOptions = {}) {
|
||||
if (
|
||||
extendedOptions.thinking &&
|
||||
updatedOptions?.model &&
|
||||
(/claude-3[-.]7/.test(updatedOptions.model) ||
|
||||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(updatedOptions.model))
|
||||
/claude-3[-.]7/.test(updatedOptions.model)
|
||||
) {
|
||||
updatedOptions.thinking = {
|
||||
type: 'enabled',
|
||||
|
||||
@@ -3,6 +3,7 @@ const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
|
||||
const { getAssistant } = require('~/models/Assistant');
|
||||
|
||||
const buildOptions = async (endpoint, parsedBody) => {
|
||||
|
||||
const { promptPrefix, assistant_id, iconURL, greeting, spec, artifacts, ...modelOptions } =
|
||||
parsedBody;
|
||||
const endpointOption = removeNullishValues({
|
||||
|
||||
@@ -25,10 +25,10 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
|
||||
let credentials = isUserProvided
|
||||
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.bedrock })
|
||||
: {
|
||||
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
|
||||
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
|
||||
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
|
||||
};
|
||||
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
|
||||
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
|
||||
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
|
||||
};
|
||||
|
||||
if (!credentials) {
|
||||
throw new Error('Bedrock credentials not provided. Please provide them again.');
|
||||
|
||||
@@ -105,7 +105,6 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
|
||||
headers: resolvedHeaders,
|
||||
addParams: endpointConfig.addParams,
|
||||
dropParams: endpointConfig.dropParams,
|
||||
customParams: endpointConfig.customParams,
|
||||
titleConvo: endpointConfig.titleConvo,
|
||||
titleModel: endpointConfig.titleModel,
|
||||
forcePrompt: endpointConfig.forcePrompt,
|
||||
|
||||
@@ -136,7 +136,7 @@ function getLLMConfig(apiKey, options = {}, endpoint = null) {
|
||||
Object.assign(llmConfig, azure);
|
||||
llmConfig.model = llmConfig.azureOpenAIApiDeploymentName;
|
||||
} else {
|
||||
llmConfig.apiKey = apiKey;
|
||||
llmConfig.openAIApiKey = apiKey;
|
||||
// Object.assign(llmConfig, {
|
||||
// configuration: { apiKey },
|
||||
// });
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
const axios = require('axios');
|
||||
const fs = require('fs');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Uploads a document to Azure Document Intelligence API and returns the Markdown result.
|
||||
*
|
||||
* @param {Object} params - The parameters for the Azure Document Intelligence request.
|
||||
* @param {string} params.filePath - The path to the file on disk.
|
||||
* @param {string} params.apiKey - Azure API key.
|
||||
* @param {string} params.endpoint - Azure Document Intelligence endpoint.
|
||||
* @param {string} params.modelId - The model ID to use for analysis.
|
||||
* @returns {Promise<Object>} - The Document Intelligence result.
|
||||
*/
|
||||
async function uploadAzureDocumentIntelligence({ filePath, apiKey, endpoint, modelId }) {
|
||||
// Read and encode file
|
||||
const fileBuffer = fs.readFileSync(filePath);
|
||||
const base64Source = fileBuffer.toString('base64');
|
||||
|
||||
// Build URL (ensure no trailing slash on endpoint)
|
||||
const url = `${endpoint.replace(/\/+$/, '')}/documentModels/${modelId}:analyze?outputContentFormat=markdown`;
|
||||
|
||||
try {
|
||||
// Kick off the analysis
|
||||
const response = await axios.post(
|
||||
url,
|
||||
{ base64Source },
|
||||
{
|
||||
headers: {
|
||||
'Ocp-Apim-Subscription-Key': apiKey,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
// Axios lower-cases header keys, but allow either form
|
||||
const headers = response.headers || {};
|
||||
const operationLocation = headers['operation-location'] || headers['Operation-Location'];
|
||||
if (!operationLocation) {
|
||||
throw new Error('Missing Operation-Location header in Azure response.');
|
||||
}
|
||||
|
||||
// Poll until done
|
||||
let resultContent;
|
||||
while (true) {
|
||||
const pollResponse = await axios.get(operationLocation, {
|
||||
headers: { 'Ocp-Apim-Subscription-Key': apiKey },
|
||||
});
|
||||
|
||||
const { status, resultUrl } = pollResponse.data;
|
||||
if (status === 'succeeded') {
|
||||
const final = await axios.get(resultUrl, {
|
||||
headers: { 'Ocp-Apim-Subscription-Key': apiKey },
|
||||
});
|
||||
resultContent = final.data.analyzeResult.content;
|
||||
break;
|
||||
}
|
||||
if (status === 'failed') {
|
||||
throw new Error('Azure Document Intelligence processing failed.');
|
||||
}
|
||||
// Wait 2s before retry
|
||||
await new Promise((r) => setTimeout(r, 2000));
|
||||
}
|
||||
|
||||
return resultContent;
|
||||
} catch (error) {
|
||||
logger.error('Error performing Azure Document Intelligence:', error.message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
uploadAzureDocumentIntelligence,
|
||||
};
|
||||
@@ -1,103 +0,0 @@
|
||||
const fs = require('fs');
|
||||
|
||||
const mockAxios = {
|
||||
interceptors: {
|
||||
request: { use: jest.fn(), eject: jest.fn() },
|
||||
response: { use: jest.fn(), eject: jest.fn() },
|
||||
},
|
||||
create: jest.fn().mockReturnValue({
|
||||
defaults: { proxy: null },
|
||||
get: jest.fn().mockResolvedValue({ data: {} }),
|
||||
post: jest.fn().mockResolvedValue({ data: {} }),
|
||||
put: jest.fn().mockResolvedValue({ data: {} }),
|
||||
delete: jest.fn().mockResolvedValue({ data: {} }),
|
||||
}),
|
||||
get: jest.fn().mockResolvedValue({ data: {} }),
|
||||
post: jest.fn().mockResolvedValue({ data: {} }),
|
||||
put: jest.fn().mockResolvedValue({ data: {} }),
|
||||
delete: jest.fn().mockResolvedValue({ data: {} }),
|
||||
reset: jest.fn().mockImplementation(function () {
|
||||
this.get.mockClear();
|
||||
this.post.mockClear();
|
||||
this.put.mockClear();
|
||||
this.delete.mockClear();
|
||||
this.create.mockClear();
|
||||
}),
|
||||
};
|
||||
|
||||
jest.mock('axios', () => mockAxios);
|
||||
jest.mock('fs');
|
||||
jest.mock('~/config', () => ({
|
||||
logger: { error: jest.fn() },
|
||||
}));
|
||||
|
||||
const { uploadAzureDocumentIntelligence } = require('./crud');
|
||||
|
||||
describe('AzureDocumentIntelligence Service', () => {
|
||||
beforeEach(() => {
|
||||
mockAxios.reset();
|
||||
fs.readFileSync.mockReset();
|
||||
});
|
||||
|
||||
it('should upload and poll until it gets the Markdown result', async () => {
|
||||
const mockFileBuffer = Buffer.from('test file content');
|
||||
const mockBase64 = mockFileBuffer.toString('base64');
|
||||
const mockOpLocation = 'https://azure-ocr-endpoint.com/operations/123';
|
||||
const mockResultUrl = 'https://azure-ocr-endpoint.com/results/123';
|
||||
const mockFinal = { analyzeResult: { content: 'Final analysis result' } };
|
||||
|
||||
// fs.readFileSync returns our buffer
|
||||
fs.readFileSync.mockReturnValue(mockFileBuffer);
|
||||
|
||||
// First axios.post => returns Operation-Location header
|
||||
mockAxios.post.mockResolvedValueOnce({
|
||||
headers: { 'Operation-Location': mockOpLocation },
|
||||
});
|
||||
|
||||
// First axios.get => poll success, returns status + resultUrl
|
||||
// Second axios.get => fetch final result
|
||||
mockAxios.get
|
||||
.mockResolvedValueOnce({ data: { status: 'succeeded', resultUrl: mockResultUrl } })
|
||||
.mockResolvedValueOnce({ data: mockFinal });
|
||||
|
||||
const result = await uploadAzureDocumentIntelligence({
|
||||
filePath: '/path/to/test.pdf',
|
||||
apiKey: 'azure-api-key',
|
||||
endpoint: 'https://azure-ocr-endpoint.com/',
|
||||
modelId: 'prebuilt-layout',
|
||||
});
|
||||
|
||||
// Validate read
|
||||
expect(fs.readFileSync).toHaveBeenCalledWith('/path/to/test.pdf');
|
||||
|
||||
// Validate initial POST
|
||||
expect(mockAxios.post).toHaveBeenCalledWith(
|
||||
'https://azure-ocr-endpoint.com/documentModels/prebuilt-layout:analyze?outputContentFormat=markdown',
|
||||
{ base64Source: mockBase64 },
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({
|
||||
'Ocp-Apim-Subscription-Key': 'azure-api-key',
|
||||
'Content-Type': 'application/json',
|
||||
}),
|
||||
}),
|
||||
);
|
||||
|
||||
// Validate polling GET
|
||||
expect(mockAxios.get).toHaveBeenCalledWith(
|
||||
mockOpLocation,
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({ 'Ocp-Apim-Subscription-Key': 'azure-api-key' }),
|
||||
}),
|
||||
);
|
||||
|
||||
// Validate final fetch GET
|
||||
expect(mockAxios.get).toHaveBeenCalledWith(
|
||||
mockResultUrl,
|
||||
expect.objectContaining({
|
||||
headers: expect.objectContaining({ 'Ocp-Apim-Subscription-Key': 'azure-api-key' }),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(result).toEqual('Final analysis result');
|
||||
});
|
||||
});
|
||||
@@ -1,5 +0,0 @@
|
||||
const crud = require('./crud');
|
||||
|
||||
module.exports = {
|
||||
...crud,
|
||||
};
|
||||
@@ -2,12 +2,7 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const FormData = require('form-data');
|
||||
const {
|
||||
FileSources,
|
||||
envVarRegex,
|
||||
extractEnvVariable,
|
||||
extractVariableName,
|
||||
} = require('librechat-data-provider');
|
||||
const { FileSources, envVarRegex, extractEnvVariable } = require('librechat-data-provider');
|
||||
const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { logger, createAxiosInstance } = require('~/config');
|
||||
const { logAxiosError } = require('~/utils/axios');
|
||||
@@ -113,6 +108,11 @@ async function performOCR({
|
||||
});
|
||||
}
|
||||
|
||||
function extractVariableName(str) {
|
||||
const match = str.match(envVarRegex);
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Uploads a file to the Mistral OCR API and processes the OCR result.
|
||||
*
|
||||
|
||||
@@ -54,7 +54,7 @@ async function deleteOpenAIFile(req, file, openai) {
|
||||
throw new Error('OpenAI returned `false` for deleted status');
|
||||
}
|
||||
logger.debug(
|
||||
`[deleteOpenAIFile] User ${req.user.id} successfully deleted file "${file.file_id}" from OpenAI`,
|
||||
`[deleteOpenAIFile] User ${req.user.id} successfully deleted ${file.file_id} from OpenAI`,
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error('[deleteOpenAIFile] Error deleting file from OpenAI: ' + error.message);
|
||||
|
||||
@@ -5,10 +5,9 @@ const { EModelEndpoint } = require('librechat-data-provider');
|
||||
* Resizes an image from a given buffer based on the specified resolution.
|
||||
*
|
||||
* @param {Buffer} inputBuffer - The buffer of the image to be resized.
|
||||
* @param {'low' | 'high' | {percentage?: number, px?: number}} resolution - The resolution to resize the image to.
|
||||
* @param {'low' | 'high'} resolution - The resolution to resize the image to.
|
||||
* 'low' for a maximum of 512x512 resolution,
|
||||
* 'high' for a maximum of 768x2000 resolution,
|
||||
* or a custom object with percentage or px values.
|
||||
* 'high' for a maximum of 768x2000 resolution.
|
||||
* @param {EModelEndpoint} endpoint - Identifier for specific endpoint handling
|
||||
* @returns {Promise<{buffer: Buffer, width: number, height: number}>} An object containing the resized image buffer and its dimensions.
|
||||
* @throws Will throw an error if the resolution parameter is invalid.
|
||||
@@ -18,32 +17,10 @@ async function resizeImageBuffer(inputBuffer, resolution, endpoint) {
|
||||
const maxShortSideHighRes = 768;
|
||||
const maxLongSideHighRes = endpoint === EModelEndpoint.anthropic ? 1568 : 2000;
|
||||
|
||||
let customPercent, customPx;
|
||||
if (resolution && typeof resolution === 'object') {
|
||||
if (typeof resolution.percentage === 'number') {
|
||||
customPercent = resolution.percentage;
|
||||
} else if (typeof resolution.px === 'number') {
|
||||
customPx = resolution.px;
|
||||
}
|
||||
}
|
||||
|
||||
let newWidth, newHeight;
|
||||
let resizeOptions = { fit: 'inside', withoutEnlargement: true };
|
||||
|
||||
if (customPercent != null || customPx != null) {
|
||||
// percentage-based resize
|
||||
const metadata = await sharp(inputBuffer).metadata();
|
||||
if (customPercent != null) {
|
||||
newWidth = Math.round(metadata.width * (customPercent / 100));
|
||||
newHeight = Math.round(metadata.height * (customPercent / 100));
|
||||
} else {
|
||||
// pixel max on both sides
|
||||
newWidth = Math.min(metadata.width, customPx);
|
||||
newHeight = Math.min(metadata.height, customPx);
|
||||
}
|
||||
resizeOptions.width = newWidth;
|
||||
resizeOptions.height = newHeight;
|
||||
} else if (resolution === 'low') {
|
||||
if (resolution === 'low') {
|
||||
resizeOptions.width = maxLowRes;
|
||||
resizeOptions.height = maxLowRes;
|
||||
} else if (resolution === 'high') {
|
||||
|
||||
@@ -137,13 +137,11 @@ const processDeleteRequest = async ({ req, files }) => {
|
||||
/** @type {Record<string, OpenAI | undefined>} */
|
||||
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
|
||||
const initializeClients = async () => {
|
||||
if (req.app.locals[EModelEndpoint.assistants]) {
|
||||
const openAIClient = await getOpenAIClient({
|
||||
req,
|
||||
overrideEndpoint: EModelEndpoint.assistants,
|
||||
});
|
||||
client[FileSources.openai] = openAIClient.openai;
|
||||
}
|
||||
const openAIClient = await getOpenAIClient({
|
||||
req,
|
||||
overrideEndpoint: EModelEndpoint.assistants,
|
||||
});
|
||||
client[FileSources.openai] = openAIClient.openai;
|
||||
|
||||
if (!req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
return;
|
||||
@@ -695,7 +693,7 @@ const processOpenAIFile = async ({
|
||||
const processOpenAIImageOutput = async ({ req, buffer, file_id, filename, fileExt }) => {
|
||||
const currentDate = new Date();
|
||||
const formattedDate = currentDate.toISOString();
|
||||
const _file = await convertImage(req, buffer, undefined, `${file_id}${fileExt}`);
|
||||
const _file = await convertImage(req, buffer, 'high', `${file_id}${fileExt}`);
|
||||
const file = {
|
||||
..._file,
|
||||
usage: 1,
|
||||
@@ -840,9 +838,8 @@ function base64ToBuffer(base64String) {
|
||||
|
||||
async function saveBase64Image(
|
||||
url,
|
||||
{ req, file_id: _file_id, filename: _filename, endpoint, context, resolution },
|
||||
{ req, file_id: _file_id, filename: _filename, endpoint, context, resolution = 'high' },
|
||||
) {
|
||||
const effectiveResolution = resolution ?? req.app.locals.fileConfig?.imageGeneration ?? 'high';
|
||||
const file_id = _file_id ?? v4();
|
||||
let filename = `${file_id}-${_filename}`;
|
||||
const { buffer: inputBuffer, type } = base64ToBuffer(url);
|
||||
@@ -855,7 +852,7 @@ async function saveBase64Image(
|
||||
}
|
||||
}
|
||||
|
||||
const image = await resizeImageBuffer(inputBuffer, effectiveResolution, endpoint);
|
||||
const image = await resizeImageBuffer(inputBuffer, resolution, endpoint);
|
||||
const source = req.app.locals.fileStrategy;
|
||||
const { saveBuffer } = getStrategyFunctions(source);
|
||||
const filepath = await saveBuffer({
|
||||
|
||||
@@ -47,7 +47,6 @@ const { uploadOpenAIFile, deleteOpenAIFile, getOpenAIFileStream } = require('./O
|
||||
const { getCodeOutputDownloadStream, uploadCodeEnvFile } = require('./Code');
|
||||
const { uploadVectors, deleteVectors } = require('./VectorDB');
|
||||
const { uploadMistralOCR } = require('./MistralOCR');
|
||||
const { uploadAzureDocumentIntelligence } = require('./AzureDocumentIntelligence'); // Import the function
|
||||
|
||||
/**
|
||||
* Firebase Storage Strategy Functions
|
||||
@@ -203,26 +202,6 @@ const mistralOCRStrategy = () => ({
|
||||
handleFileUpload: uploadMistralOCR,
|
||||
});
|
||||
|
||||
const azureOCRStrategy = () => ({
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
saveURL: null,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
getFileURL: null,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
saveBuffer: null,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
processAvatar: null,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
handleImageUpload: null,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
prepareImagePayload: null,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
deleteFile: null,
|
||||
handleFileUpload: uploadAzureDocumentIntelligence,
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
getDownloadStream: null,
|
||||
});
|
||||
|
||||
// Strategy Selector
|
||||
const getStrategyFunctions = (fileSource) => {
|
||||
if (fileSource === FileSources.firebase) {
|
||||
@@ -243,8 +222,6 @@ const getStrategyFunctions = (fileSource) => {
|
||||
return codeOutputStrategy();
|
||||
} else if (fileSource === FileSources.mistral_ocr) {
|
||||
return mistralOCRStrategy();
|
||||
} else if (fileSource === FileSources.azure_ocr) {
|
||||
return azureOCRStrategy();
|
||||
} else {
|
||||
throw new Error('Invalid file source');
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
const { z } = require('zod');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { normalizeServerName } = require('librechat-mcp');
|
||||
const { Constants: AgentConstants, Providers } = require('@librechat/agents');
|
||||
const {
|
||||
Constants,
|
||||
@@ -39,7 +38,6 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
|
||||
}
|
||||
|
||||
const [toolName, serverName] = toolKey.split(Constants.mcp_delimiter);
|
||||
const normalizedToolKey = `${toolName}${Constants.mcp_delimiter}${normalizeServerName(serverName)}`;
|
||||
|
||||
if (!req.user?.id) {
|
||||
logger.error(
|
||||
@@ -85,7 +83,7 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
|
||||
|
||||
const toolInstance = tool(_call, {
|
||||
schema,
|
||||
name: normalizedToolKey,
|
||||
name: toolKey,
|
||||
description: description || '',
|
||||
responseFormat: AgentConstants.CONTENT_AND_ARTIFACT,
|
||||
});
|
||||
|
||||
@@ -66,26 +66,16 @@ const getUserPluginAuthValue = async (userId, authField, throwError = true) => {
|
||||
// }
|
||||
// };
|
||||
|
||||
/**
|
||||
*
|
||||
* @async
|
||||
* @param {string} userId
|
||||
* @param {string} authField
|
||||
* @param {string} pluginKey
|
||||
* @param {string} value
|
||||
* @returns {Promise<IPluginAuth>}
|
||||
* @throws {Error}
|
||||
*/
|
||||
const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
|
||||
try {
|
||||
const encryptedValue = await encrypt(value);
|
||||
const pluginAuth = await PluginAuth.findOne({ userId, authField }).lean();
|
||||
if (pluginAuth) {
|
||||
return await PluginAuth.findOneAndUpdate(
|
||||
const pluginAuth = await PluginAuth.updateOne(
|
||||
{ userId, authField },
|
||||
{ $set: { value: encryptedValue } },
|
||||
{ new: true, upsert: true },
|
||||
).lean();
|
||||
);
|
||||
return pluginAuth;
|
||||
} else {
|
||||
const newPluginAuth = await new PluginAuth({
|
||||
userId,
|
||||
@@ -94,7 +84,7 @@ const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
|
||||
pluginKey,
|
||||
});
|
||||
await newPluginAuth.save();
|
||||
return newPluginAuth.toObject();
|
||||
return newPluginAuth;
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error('[updateUserPluginAuth]', err);
|
||||
@@ -102,14 +92,6 @@ const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* @async
|
||||
* @param {string} userId
|
||||
* @param {string} authField
|
||||
* @param {boolean} [all]
|
||||
* @returns {Promise<import('mongoose').DeleteResult>}
|
||||
* @throws {Error}
|
||||
*/
|
||||
const deleteUserPluginAuth = async (userId, authField, all = false) => {
|
||||
if (all) {
|
||||
try {
|
||||
|
||||
@@ -132,8 +132,6 @@ async function saveUserMessage(req, params) {
|
||||
* @param {string} params.endpoint - The conversation endpoint
|
||||
* @param {string} params.parentMessageId - The latest user message that triggered this response.
|
||||
* @param {string} [params.instructions] - Optional: from preset for `instructions` field.
|
||||
* @param {string} [params.spec] - Optional: Model spec identifier.
|
||||
* @param {string} [params.iconURL]
|
||||
* Overrides the instructions of the assistant.
|
||||
* @param {string} [params.promptPrefix] - Optional: from preset for `additional_instructions` field.
|
||||
* @return {Promise<Run>} A promise that resolves to the created run object.
|
||||
@@ -156,8 +154,6 @@ async function saveAssistantMessage(req, params) {
|
||||
text: params.text,
|
||||
unfinished: false,
|
||||
// tokenCount,
|
||||
iconURL: params.iconURL,
|
||||
spec: params.spec,
|
||||
});
|
||||
|
||||
await saveConvo(
|
||||
@@ -169,8 +165,6 @@ async function saveAssistantMessage(req, params) {
|
||||
instructions: params.instructions,
|
||||
assistant_id: params.assistant_id,
|
||||
model: params.model,
|
||||
iconURL: params.iconURL,
|
||||
spec: params.spec,
|
||||
},
|
||||
{ context: 'api/server/services/Threads/manage.js #saveAssistantMessage' },
|
||||
);
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { zodToJsonSchema } = require('zod-to-json-schema');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
const { tool: toolFn, Tool, DynamicStructuredTool } = require('@langchain/core/tools');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
const {
|
||||
Tools,
|
||||
ErrorTypes,
|
||||
ContentTypes,
|
||||
imageGenTools,
|
||||
EToolResources,
|
||||
EModelEndpoint,
|
||||
actionDelimiter,
|
||||
ImageVisionTool,
|
||||
@@ -29,7 +28,6 @@ const {
|
||||
toolkits,
|
||||
} = require('~/app/clients/tools');
|
||||
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
|
||||
const { createOnSearchResults } = require('~/server/services/Tools/search');
|
||||
const { isActionDomainAllowed } = require('~/server/services/domains');
|
||||
const { getEndpointsConfig } = require('~/server/services/Config');
|
||||
const { recordUsage } = require('~/server/services/Threads');
|
||||
@@ -38,30 +36,6 @@ const { redactMessage } = require('~/config/parsers');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* @param {string} toolName
|
||||
* @returns {string | undefined} toolKey
|
||||
*/
|
||||
function getToolkitKey(toolName) {
|
||||
/** @type {string|undefined} */
|
||||
let toolkitKey;
|
||||
for (const toolkit of toolkits) {
|
||||
if (toolName.startsWith(EToolResources.image_edit)) {
|
||||
const splitMatches = toolkit.pluginKey.split('_');
|
||||
const suffix = splitMatches[splitMatches.length - 1];
|
||||
if (toolName.endsWith(suffix)) {
|
||||
toolkitKey = toolkit.pluginKey;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (toolName.startsWith(toolkit.pluginKey)) {
|
||||
toolkitKey = toolkit.pluginKey;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return toolkitKey;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads and formats tools from the specified tool directory.
|
||||
*
|
||||
@@ -134,7 +108,7 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
|
||||
tools.push(formattedTool);
|
||||
}
|
||||
|
||||
/** Basic Tools & Toolkits; schema: { input: string } */
|
||||
/** Basic Tools; schema: { input: string } */
|
||||
const basicToolInstances = [
|
||||
new Calculator(),
|
||||
...createOpenAIImageTools({ override: true }),
|
||||
@@ -143,7 +117,9 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
|
||||
for (const toolInstance of basicToolInstances) {
|
||||
const formattedTool = formatToOpenAIAssistantTool(toolInstance);
|
||||
let toolName = formattedTool[Tools.function].name;
|
||||
toolName = getToolkitKey(toolName) ?? toolName;
|
||||
toolName = toolkits.some((toolkit) => toolName.startsWith(toolkit.pluginKey))
|
||||
? toolName.split('_')[0]
|
||||
: toolName;
|
||||
if (filter.has(toolName) && included.size === 0) {
|
||||
continue;
|
||||
}
|
||||
@@ -505,15 +481,11 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
|
||||
const checkCapability = (capability) => enabledCapabilities.has(capability);
|
||||
const areToolsEnabled = checkCapability(AgentCapabilities.tools);
|
||||
|
||||
let includesWebSearch = false;
|
||||
const _agentTools = agent.tools?.filter((tool) => {
|
||||
if (tool === Tools.file_search) {
|
||||
return checkCapability(AgentCapabilities.file_search);
|
||||
} else if (tool === Tools.execute_code) {
|
||||
return checkCapability(AgentCapabilities.execute_code);
|
||||
} else if (tool === Tools.web_search) {
|
||||
includesWebSearch = checkCapability(AgentCapabilities.web_search);
|
||||
return includesWebSearch;
|
||||
} else if (!areToolsEnabled && !tool.includes(actionDelimiter)) {
|
||||
return false;
|
||||
}
|
||||
@@ -523,11 +495,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
|
||||
if (!_agentTools || _agentTools.length === 0) {
|
||||
return {};
|
||||
}
|
||||
/** @type {ReturnType<createOnSearchResults>} */
|
||||
let webSearchCallbacks;
|
||||
if (includesWebSearch) {
|
||||
webSearchCallbacks = createOnSearchResults(res);
|
||||
}
|
||||
|
||||
const { loadedTools, toolContextMap } = await loadTools({
|
||||
agent,
|
||||
functions: true,
|
||||
@@ -541,7 +509,6 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
|
||||
uploadImageBuffer,
|
||||
returnMetadata: true,
|
||||
fileStrategy: req.app.locals.fileStrategy,
|
||||
[Tools.web_search]: webSearchCallbacks,
|
||||
},
|
||||
});
|
||||
|
||||
@@ -715,7 +682,6 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getToolkitKey,
|
||||
loadAgentTools,
|
||||
loadAndFormatTools,
|
||||
processRequiredActions,
|
||||
|
||||
@@ -1,122 +0,0 @@
|
||||
const { nanoid } = require('nanoid');
|
||||
const { Tools } = require('librechat-data-provider');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Creates a function to handle search results and stream them as attachments
|
||||
* @param {import('http').ServerResponse} res - The HTTP server response object
|
||||
* @returns {{ onSearchResults: function(SearchResult, GraphRunnableConfig): void; onGetHighlights: function(string): void}} - Function that takes search results and returns or streams an attachment
|
||||
*/
|
||||
function createOnSearchResults(res) {
|
||||
const context = {
|
||||
sourceMap: new Map(),
|
||||
searchResultData: undefined,
|
||||
toolCallId: undefined,
|
||||
attachmentName: undefined,
|
||||
messageId: undefined,
|
||||
conversationId: undefined,
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {SearchResult} results
|
||||
* @param {GraphRunnableConfig} runnableConfig
|
||||
*/
|
||||
function onSearchResults(results, runnableConfig) {
|
||||
logger.info(
|
||||
`[onSearchResults] user: ${runnableConfig.metadata.user_id} | thread_id: ${runnableConfig.metadata.thread_id} | run_id: ${runnableConfig.metadata.run_id}`,
|
||||
results,
|
||||
);
|
||||
|
||||
if (!results.success) {
|
||||
logger.error(
|
||||
`[onSearchResults] user: ${runnableConfig.metadata.user_id} | thread_id: ${runnableConfig.metadata.thread_id} | run_id: ${runnableConfig.metadata.run_id} | error: ${results.error}`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const turn = runnableConfig.toolCall?.turn ?? 0;
|
||||
const data = { turn, ...structuredClone(results.data ?? {}) };
|
||||
context.searchResultData = data;
|
||||
|
||||
// Map sources to links
|
||||
for (let i = 0; i < data.organic.length; i++) {
|
||||
const source = data.organic[i];
|
||||
if (source.link) {
|
||||
context.sourceMap.set(source.link, {
|
||||
type: 'organic',
|
||||
index: i,
|
||||
turn,
|
||||
});
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < data.topStories.length; i++) {
|
||||
const source = data.topStories[i];
|
||||
if (source.link) {
|
||||
context.sourceMap.set(source.link, {
|
||||
type: 'topStories',
|
||||
index: i,
|
||||
turn,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
context.toolCallId = runnableConfig.toolCall.id;
|
||||
context.messageId = runnableConfig.metadata.run_id;
|
||||
context.conversationId = runnableConfig.metadata.thread_id;
|
||||
context.attachmentName = `${runnableConfig.toolCall.name}_${context.toolCallId}_${nanoid()}`;
|
||||
|
||||
const attachment = buildAttachment(context);
|
||||
|
||||
if (!res.headersSent) {
|
||||
return attachment;
|
||||
}
|
||||
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} link
|
||||
* @returns {void}
|
||||
*/
|
||||
function onGetHighlights(link) {
|
||||
const source = context.sourceMap.get(link);
|
||||
if (!source) {
|
||||
return;
|
||||
}
|
||||
const { type, index } = source;
|
||||
const data = context.searchResultData;
|
||||
if (!data) {
|
||||
return;
|
||||
}
|
||||
if (data[type][index] != null) {
|
||||
data[type][index].processed = true;
|
||||
}
|
||||
|
||||
const attachment = buildAttachment(context);
|
||||
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
|
||||
}
|
||||
|
||||
return {
|
||||
onSearchResults,
|
||||
onGetHighlights,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to build an attachment object
|
||||
* @param {object} context - The context containing attachment data
|
||||
* @returns {object} - The attachment object
|
||||
*/
|
||||
function buildAttachment(context) {
|
||||
return {
|
||||
messageId: context.messageId,
|
||||
toolCallId: context.toolCallId,
|
||||
conversationId: context.conversationId,
|
||||
name: context.attachmentName,
|
||||
type: Tools.web_search,
|
||||
[Tools.web_search]: context.searchResultData,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
createOnSearchResults,
|
||||
};
|
||||
@@ -1,9 +1,7 @@
|
||||
const {
|
||||
Constants,
|
||||
webSearchKeys,
|
||||
deprecatedAzureVariables,
|
||||
conflictingAzureVariables,
|
||||
extractVariableName,
|
||||
} = require('librechat-data-provider');
|
||||
const { isEnabled, checkEmailConfig } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
@@ -143,56 +141,4 @@ function checkPasswordReset() {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks web search configuration values to ensure they are environment variable references.
|
||||
* Warns if actual API keys or URLs are used instead of environment variable references.
|
||||
* Logs debug information for properly configured environment variable references.
|
||||
* @param {Object} webSearchConfig - The loaded web search configuration object.
|
||||
*/
|
||||
function checkWebSearchConfig(webSearchConfig) {
|
||||
if (!webSearchConfig) {
|
||||
return;
|
||||
}
|
||||
|
||||
webSearchKeys.forEach((key) => {
|
||||
const value = webSearchConfig[key];
|
||||
|
||||
if (typeof value === 'string') {
|
||||
const varName = extractVariableName(value);
|
||||
|
||||
if (varName) {
|
||||
// This is a proper environment variable reference
|
||||
const actualValue = process.env[varName];
|
||||
if (actualValue) {
|
||||
logger.debug(`Web search ${key}: Using environment variable ${varName} with value set`);
|
||||
} else {
|
||||
logger.debug(
|
||||
`Web search ${key}: Using environment variable ${varName} (not set in environment, user provided value)`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// This is not an environment variable reference - warn user
|
||||
logger.warn(
|
||||
`❗ Web search configuration error: ${key} contains an actual value instead of an environment variable reference.
|
||||
|
||||
Current value: "${value.substring(0, 10)}..."
|
||||
|
||||
This is incorrect! You should use environment variable references in your librechat.yaml file, such as:
|
||||
${key}: "\${YOUR_ENV_VAR_NAME}"
|
||||
|
||||
Then set the actual API key in your .env file or environment variables.
|
||||
|
||||
More info: https://www.librechat.ai/docs/configuration/librechat_yaml/web_search`,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
checkHealth,
|
||||
checkConfig,
|
||||
checkVariables,
|
||||
checkAzureVariables,
|
||||
checkWebSearchConfig,
|
||||
};
|
||||
module.exports = { checkVariables, checkHealth, checkConfig, checkAzureVariables };
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
// Mock librechat-data-provider
|
||||
jest.mock('librechat-data-provider', () => ({
|
||||
...jest.requireActual('librechat-data-provider'),
|
||||
extractVariableName: jest.fn(),
|
||||
}));
|
||||
|
||||
// Mock the config logger
|
||||
jest.mock('~/config', () => ({
|
||||
logger: {
|
||||
debug: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
const { checkWebSearchConfig } = require('./checks');
|
||||
const { logger } = require('~/config');
|
||||
const { extractVariableName } = require('librechat-data-provider');
|
||||
|
||||
describe('checkWebSearchConfig', () => {
|
||||
let originalEnv;
|
||||
|
||||
beforeEach(() => {
|
||||
// Clear all mocks
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Store original environment
|
||||
originalEnv = process.env;
|
||||
|
||||
// Reset process.env
|
||||
process.env = { ...originalEnv };
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
describe('when webSearchConfig is undefined or null', () => {
|
||||
it('should return early without logging when config is undefined', () => {
|
||||
checkWebSearchConfig(undefined);
|
||||
|
||||
expect(logger.debug).not.toHaveBeenCalled();
|
||||
expect(logger.warn).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return early without logging when config is null', () => {
|
||||
checkWebSearchConfig(null);
|
||||
|
||||
expect(logger.debug).not.toHaveBeenCalled();
|
||||
expect(logger.warn).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('when config values are proper environment variable references', () => {
|
||||
it('should log debug message for each valid environment variable with value set', () => {
|
||||
const config = {
|
||||
serperApiKey: '${SERPER_API_KEY}',
|
||||
jinaApiKey: '${JINA_API_KEY}',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValueOnce('SERPER_API_KEY').mockReturnValueOnce('JINA_API_KEY');
|
||||
|
||||
process.env.SERPER_API_KEY = 'test-serper-key';
|
||||
process.env.JINA_API_KEY = 'test-jina-key';
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(extractVariableName).toHaveBeenCalledWith('${SERPER_API_KEY}');
|
||||
expect(extractVariableName).toHaveBeenCalledWith('${JINA_API_KEY}');
|
||||
expect(logger.debug).toHaveBeenCalledWith(
|
||||
'Web search serperApiKey: Using environment variable SERPER_API_KEY with value set',
|
||||
);
|
||||
expect(logger.debug).toHaveBeenCalledWith(
|
||||
'Web search jinaApiKey: Using environment variable JINA_API_KEY with value set',
|
||||
);
|
||||
expect(logger.warn).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should log debug message for environment variables not set in environment', () => {
|
||||
const config = {
|
||||
cohereApiKey: '${COHERE_API_KEY}',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValue('COHERE_API_KEY');
|
||||
|
||||
delete process.env.COHERE_API_KEY;
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(logger.debug).toHaveBeenCalledWith(
|
||||
'Web search cohereApiKey: Using environment variable COHERE_API_KEY (not set in environment, user provided value)',
|
||||
);
|
||||
expect(logger.warn).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('when config values are actual values instead of environment variable references', () => {
|
||||
it('should warn when serperApiKey contains actual API key', () => {
|
||||
const config = {
|
||||
serperApiKey: 'sk-1234567890abcdef',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValue(null);
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'❗ Web search configuration error: serperApiKey contains an actual value',
|
||||
),
|
||||
);
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Current value: "sk-1234567..."'),
|
||||
);
|
||||
expect(logger.debug).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should warn when firecrawlApiUrl contains actual URL', () => {
|
||||
const config = {
|
||||
firecrawlApiUrl: 'https://api.firecrawl.dev',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValue(null);
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'❗ Web search configuration error: firecrawlApiUrl contains an actual value',
|
||||
),
|
||||
);
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Current value: "https://ap..."'),
|
||||
);
|
||||
});
|
||||
|
||||
it('should include documentation link in warning message', () => {
|
||||
const config = {
|
||||
firecrawlApiKey: 'fc-actual-key',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValue(null);
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
'More info: https://www.librechat.ai/docs/configuration/librechat_yaml/web_search',
|
||||
),
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('when config contains mixed value types', () => {
|
||||
it('should only process string values and ignore non-string values', () => {
|
||||
const config = {
|
||||
serperApiKey: '${SERPER_API_KEY}',
|
||||
safeSearch: 1,
|
||||
scraperTimeout: 7500,
|
||||
jinaApiKey: 'actual-key',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValueOnce('SERPER_API_KEY').mockReturnValueOnce(null);
|
||||
|
||||
process.env.SERPER_API_KEY = 'test-key';
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(extractVariableName).toHaveBeenCalledTimes(2);
|
||||
expect(logger.debug).toHaveBeenCalledTimes(1);
|
||||
expect(logger.warn).toHaveBeenCalledTimes(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle config with no web search keys', () => {
|
||||
const config = {
|
||||
someOtherKey: 'value',
|
||||
anotherKey: '${SOME_VAR}',
|
||||
};
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(extractVariableName).not.toHaveBeenCalled();
|
||||
expect(logger.debug).not.toHaveBeenCalled();
|
||||
expect(logger.warn).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should truncate long values in warning messages', () => {
|
||||
const config = {
|
||||
serperApiKey: 'this-is-a-very-long-api-key-that-should-be-truncated-in-the-warning-message',
|
||||
};
|
||||
|
||||
extractVariableName.mockReturnValue(null);
|
||||
|
||||
checkWebSearchConfig(config);
|
||||
|
||||
expect(logger.warn).toHaveBeenCalledWith(
|
||||
expect.stringContaining('Current value: "this-is-a-..."'),
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -38,7 +38,6 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
|
||||
agents: interfaceConfig?.agents ?? defaults.agents,
|
||||
temporaryChat: interfaceConfig?.temporaryChat ?? defaults.temporaryChat,
|
||||
runCode: interfaceConfig?.runCode ?? defaults.runCode,
|
||||
webSearch: interfaceConfig?.webSearch ?? defaults.webSearch,
|
||||
customWelcome: interfaceConfig?.customWelcome ?? defaults.customWelcome,
|
||||
});
|
||||
|
||||
@@ -49,7 +48,6 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: loadedInterface.runCode },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: loadedInterface.webSearch },
|
||||
});
|
||||
await updateAccessPermissions(SystemRoles.ADMIN, {
|
||||
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
|
||||
@@ -58,7 +56,6 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: loadedInterface.runCode },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: loadedInterface.webSearch },
|
||||
});
|
||||
|
||||
let i = 0;
|
||||
@@ -77,7 +74,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
|
||||
// warn about config.modelSpecs.prioritize if true and presets are enabled, that default presets will conflict with prioritizing model specs.
|
||||
if (config?.modelSpecs?.prioritize && loadedInterface.presets) {
|
||||
logger.warn(
|
||||
"Note: Prioritizing model specs can conflict with default presets if a default preset is set. It's recommended to disable presets from the interface or disable use of a default preset.",
|
||||
'Note: Prioritizing model specs can conflict with default presets if a default preset is set. It\'s recommended to disable presets from the interface or disable use of a default preset.',
|
||||
);
|
||||
i === 0 && i++;
|
||||
}
|
||||
@@ -91,14 +88,14 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
|
||||
loadedInterface.parameters)
|
||||
) {
|
||||
logger.warn(
|
||||
"Note: Enforcing model specs can conflict with the interface options: endpointsMenu, modelSelect, presets, and parameters. It's recommended to disable these options from the interface or disable enforcing model specs.",
|
||||
'Note: Enforcing model specs can conflict with the interface options: endpointsMenu, modelSelect, presets, and parameters. It\'s recommended to disable these options from the interface or disable enforcing model specs.',
|
||||
);
|
||||
i === 0 && i++;
|
||||
}
|
||||
// warn if enforce is true and prioritize is not, that enforcing model specs without prioritizing them can lead to unexpected behavior.
|
||||
if (config?.modelSpecs?.enforce && !config?.modelSpecs?.prioritize) {
|
||||
logger.warn(
|
||||
"Note: Enforcing model specs without prioritizing them can lead to unexpected behavior. It's recommended to enable prioritizing model specs if enforcing them.",
|
||||
'Note: Enforcing model specs without prioritizing them can lead to unexpected behavior. It\'s recommended to enable prioritizing model specs if enforcing them.',
|
||||
);
|
||||
i === 0 && i++;
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ describe('loadDefaultInterface', () => {
|
||||
agents: true,
|
||||
temporaryChat: true,
|
||||
runCode: true,
|
||||
webSearch: true,
|
||||
},
|
||||
};
|
||||
const configDefaults = { interface: {} };
|
||||
@@ -30,7 +29,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -43,7 +41,6 @@ describe('loadDefaultInterface', () => {
|
||||
agents: false,
|
||||
temporaryChat: false,
|
||||
runCode: false,
|
||||
webSearch: false,
|
||||
},
|
||||
};
|
||||
const configDefaults = { interface: {} };
|
||||
@@ -57,7 +54,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: false },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -74,7 +70,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -87,7 +82,6 @@ describe('loadDefaultInterface', () => {
|
||||
agents: undefined,
|
||||
temporaryChat: undefined,
|
||||
runCode: undefined,
|
||||
webSearch: undefined,
|
||||
},
|
||||
};
|
||||
const configDefaults = { interface: {} };
|
||||
@@ -101,7 +95,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -114,7 +107,6 @@ describe('loadDefaultInterface', () => {
|
||||
agents: true,
|
||||
temporaryChat: undefined,
|
||||
runCode: false,
|
||||
webSearch: true,
|
||||
},
|
||||
};
|
||||
const configDefaults = { interface: {} };
|
||||
@@ -128,7 +120,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -142,7 +133,6 @@ describe('loadDefaultInterface', () => {
|
||||
agents: true,
|
||||
temporaryChat: true,
|
||||
runCode: true,
|
||||
webSearch: true,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -155,7 +145,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -172,7 +161,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -189,7 +177,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -206,7 +193,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -232,7 +218,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
@@ -246,7 +231,6 @@ describe('loadDefaultInterface', () => {
|
||||
agents: undefined,
|
||||
temporaryChat: undefined,
|
||||
runCode: undefined,
|
||||
webSearch: undefined,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -259,33 +243,6 @@ describe('loadDefaultInterface', () => {
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
|
||||
it('should call updateAccessPermissions with the correct parameters when WEB_SEARCH is undefined', async () => {
|
||||
const config = {
|
||||
interface: {
|
||||
prompts: true,
|
||||
bookmarks: false,
|
||||
multiConvo: true,
|
||||
agents: false,
|
||||
temporaryChat: true,
|
||||
runCode: false,
|
||||
},
|
||||
};
|
||||
const configDefaults = { interface: {} };
|
||||
|
||||
await loadDefaultInterface(config, configDefaults);
|
||||
|
||||
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
|
||||
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
|
||||
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
|
||||
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
const { removeNullishValues } = require('librechat-data-provider');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Loads and maps the Cloudflare Turnstile configuration.
|
||||
*
|
||||
* Expected config structure:
|
||||
*
|
||||
* turnstile:
|
||||
* siteKey: "your-site-key-here"
|
||||
* options:
|
||||
* language: "auto" // "auto" or an ISO 639-1 language code (e.g. en)
|
||||
* size: "normal" // Options: "normal", "compact", "flexible", or "invisible"
|
||||
*
|
||||
* @param {TCustomConfig | undefined} config - The loaded custom configuration.
|
||||
* @param {TConfigDefaults} configDefaults - The custom configuration default values.
|
||||
* @returns {TCustomConfig['turnstile']} The mapped Turnstile configuration.
|
||||
*/
|
||||
function loadTurnstileConfig(config, configDefaults) {
|
||||
const { turnstile: customTurnstile = {} } = config ?? {};
|
||||
const { turnstile: defaults = {} } = configDefaults;
|
||||
|
||||
/** @type {TCustomConfig['turnstile']} */
|
||||
const loadedTurnstile = removeNullishValues({
|
||||
siteKey: customTurnstile.siteKey ?? defaults.siteKey,
|
||||
options: customTurnstile.options ?? defaults.options,
|
||||
});
|
||||
|
||||
const enabled = Boolean(loadedTurnstile.siteKey);
|
||||
|
||||
if (enabled) {
|
||||
logger.info(
|
||||
'Turnstile is ENABLED with configuration:\n' + JSON.stringify(loadedTurnstile, null, 2),
|
||||
);
|
||||
} else {
|
||||
logger.info('Turnstile is DISABLED (no siteKey provided).');
|
||||
}
|
||||
|
||||
return loadedTurnstile;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
loadTurnstileConfig,
|
||||
};
|
||||
@@ -10,7 +10,6 @@ const {
|
||||
discordLogin,
|
||||
facebookLogin,
|
||||
appleLogin,
|
||||
openIdJwtLogin,
|
||||
} = require('~/strategies');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const keyvRedis = require('~/cache/keyvRedis');
|
||||
@@ -20,7 +19,7 @@ const { logger } = require('~/config');
|
||||
*
|
||||
* @param {Express.Application} app
|
||||
*/
|
||||
const configureSocialLogins = async (app) => {
|
||||
const configureSocialLogins = (app) => {
|
||||
logger.info('Configuring social logins...');
|
||||
|
||||
if (process.env.GOOGLE_CLIENT_ID && process.env.GOOGLE_CLIENT_SECRET) {
|
||||
@@ -63,11 +62,8 @@ const configureSocialLogins = async (app) => {
|
||||
}
|
||||
app.use(session(sessionOptions));
|
||||
app.use(passport.session());
|
||||
const config = await setupOpenId();
|
||||
if (isEnabled(process.env.OPENID_REUSE_TOKENS)) {
|
||||
logger.info('OpenID token reuse is enabled.');
|
||||
passport.use('openidJwt', openIdJwtLogin(config));
|
||||
}
|
||||
setupOpenId();
|
||||
|
||||
logger.info('OpenID Connect configured.');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -200,12 +200,11 @@ function generateConfig(key, baseURL, endpoint) {
|
||||
config.capabilities = [
|
||||
AgentCapabilities.execute_code,
|
||||
AgentCapabilities.file_search,
|
||||
AgentCapabilities.web_search,
|
||||
AgentCapabilities.artifacts,
|
||||
AgentCapabilities.actions,
|
||||
AgentCapabilities.tools,
|
||||
AgentCapabilities.chain,
|
||||
AgentCapabilities.ocr,
|
||||
AgentCapabilities.chain,
|
||||
];
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const fs = require('fs').promises;
|
||||
const { getImporter } = require('./importers');
|
||||
const { indexSync } = require('~/lib/db');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
@@ -14,6 +15,8 @@ const importConversations = async (job) => {
|
||||
const jsonData = JSON.parse(fileData);
|
||||
const importer = getImporter(jsonData);
|
||||
await importer(jsonData, requestUserId);
|
||||
// Sync Meilisearch index
|
||||
await indexSync();
|
||||
logger.debug(`user: ${requestUserId} | Finished importing conversations`);
|
||||
} catch (error) {
|
||||
logger.error(`user: ${requestUserId} | Failed to import conversation: `, error);
|
||||
|
||||
@@ -84,14 +84,14 @@ describe('importChatGptConvo', () => {
|
||||
const { parent } = jsonData[0].mapping[id];
|
||||
|
||||
const expectedParentId = parent
|
||||
? (idToUUIDMap.get(parent) ?? Constants.NO_PARENT)
|
||||
? idToUUIDMap.get(parent) ?? Constants.NO_PARENT
|
||||
: Constants.NO_PARENT;
|
||||
|
||||
const actualMessageId = idToUUIDMap.get(id);
|
||||
const actualParentId = actualMessageId
|
||||
? importBatchBuilder.saveMessage.mock.calls.find(
|
||||
(call) => call[0].messageId === actualMessageId,
|
||||
)[0].parentMessageId
|
||||
(call) => call[0].messageId === actualMessageId,
|
||||
)[0].parentMessageId
|
||||
: Constants.NO_PARENT;
|
||||
|
||||
expect(actualParentId).toBe(expectedParentId);
|
||||
@@ -544,7 +544,7 @@ describe('processAssistantMessage', () => {
|
||||
|
||||
// Expected output should have all citations replaced with markdown links
|
||||
const expectedOutput =
|
||||
"Signal Sciences is a web application security company that was founded on March 10, 2014, by Andrew Peterson, Nick Galbreath, and Zane Lackey. It operates as a for-profit company with its legal name being Signal Sciences Corp. The company has achieved significant growth and is recognized as the fastest-growing web application security company in the world. Signal Sciences developed a next-gen web application firewall (NGWAF) and runtime application self-protection (RASP) technologies designed to increase security and maintain reliability without compromising the performance of modern web applications distributed across cloud, on-premise, edge, or hybrid environments ([Signal Sciences - Crunchbase Company Profile & Funding](https://www.crunchbase.com/organization/signal-sciences)) ([Demand More from Your WAF - Signal Sciences now part of Fastly](https://www.signalsciences.com/)).\n\nIn a major development, Fastly, Inc., a provider of an edge cloud platform, announced the completion of its acquisition of Signal Sciences on October 1, 2020. This acquisition was valued at approximately $775 million in cash and stock. By integrating Signal Sciences' powerful web application and API security solutions with Fastly's edge cloud platform and existing security offerings, they aimed to form a unified suite of security solutions. The merger was aimed at expanding Fastly's security portfolio, particularly at a time when digital security has become paramount for businesses operating online ([Fastly Completes Acquisition of Signal Sciences | Fastly](https://www.fastly.com/press/press-releases/fastly-completes-acquisition-signal-sciences)) ([Fastly Agrees to Acquire Signal Sciences for $775 Million - Cooley](https://www.cooley.com/news/coverage/2020/2020-08-27-fastly-agrees-to-acquire-signal-sciences-for-775-million)).";
|
||||
'Signal Sciences is a web application security company that was founded on March 10, 2014, by Andrew Peterson, Nick Galbreath, and Zane Lackey. It operates as a for-profit company with its legal name being Signal Sciences Corp. The company has achieved significant growth and is recognized as the fastest-growing web application security company in the world. Signal Sciences developed a next-gen web application firewall (NGWAF) and runtime application self-protection (RASP) technologies designed to increase security and maintain reliability without compromising the performance of modern web applications distributed across cloud, on-premise, edge, or hybrid environments ([Signal Sciences - Crunchbase Company Profile & Funding](https://www.crunchbase.com/organization/signal-sciences)) ([Demand More from Your WAF - Signal Sciences now part of Fastly](https://www.signalsciences.com/)).\n\nIn a major development, Fastly, Inc., a provider of an edge cloud platform, announced the completion of its acquisition of Signal Sciences on October 1, 2020. This acquisition was valued at approximately $775 million in cash and stock. By integrating Signal Sciences\' powerful web application and API security solutions with Fastly\'s edge cloud platform and existing security offerings, they aimed to form a unified suite of security solutions. The merger was aimed at expanding Fastly\'s security portfolio, particularly at a time when digital security has become paramount for businesses operating online ([Fastly Completes Acquisition of Signal Sciences | Fastly](https://www.fastly.com/press/press-releases/fastly-completes-acquisition-signal-sciences)) ([Fastly Agrees to Acquire Signal Sciences for $775 Million - Cooley](https://www.cooley.com/news/coverage/2020/2020-08-27-fastly-agrees-to-acquire-signal-sciences-for-775-million)).';
|
||||
|
||||
const result = processAssistantMessage(assistantMessage, messageText);
|
||||
expect(result).toBe(expectedOutput);
|
||||
@@ -603,7 +603,7 @@ describe('processAssistantMessage', () => {
|
||||
// In a ReDoS vulnerability, time would roughly double with each size increase
|
||||
for (let i = 1; i < results.length; i++) {
|
||||
const ratio = results[i] / results[i - 1];
|
||||
expect(ratio).toBeLessThan(3); // Allow for CI environment variability while still catching ReDoS
|
||||
expect(ratio).toBeLessThan(2); // Processing time should not double
|
||||
console.log(`Size ${sizes[i]} processing time ratio: ${ratio}`);
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ const staticCache = (staticPath) =>
|
||||
res.setHeader('Cache-Control', `public, max-age=${maxAge}, s-maxage=${sMaxAge}`);
|
||||
}
|
||||
},
|
||||
index: false,
|
||||
});
|
||||
|
||||
module.exports = staticCache;
|
||||
|
||||
@@ -70,13 +70,7 @@ const sendError = async (req, res, options, callback) => {
|
||||
}
|
||||
|
||||
if (shouldSaveMessage) {
|
||||
await saveMessage(
|
||||
req,
|
||||
{ ...errorMessage, user },
|
||||
{
|
||||
context: 'api/server/utils/streamResponse.js - sendError',
|
||||
},
|
||||
);
|
||||
await saveMessage(req, { ...errorMessage, user });
|
||||
}
|
||||
|
||||
if (!errorMessage.error) {
|
||||
|
||||
@@ -4,10 +4,9 @@ const googleLogin = require('./googleStrategy');
|
||||
const githubLogin = require('./githubStrategy');
|
||||
const discordLogin = require('./discordStrategy');
|
||||
const facebookLogin = require('./facebookStrategy');
|
||||
const { setupOpenId, getOpenIdConfig } = require('./openidStrategy');
|
||||
const setupOpenId = require('./openidStrategy');
|
||||
const jwtLogin = require('./jwtStrategy');
|
||||
const ldapLogin = require('./ldapStrategy');
|
||||
const openIdJwtLogin = require('./openIdJwtStrategy');
|
||||
|
||||
module.exports = {
|
||||
appleLogin,
|
||||
@@ -18,7 +17,5 @@ module.exports = {
|
||||
jwtLogin,
|
||||
facebookLogin,
|
||||
setupOpenId,
|
||||
getOpenIdConfig,
|
||||
ldapLogin,
|
||||
openIdJwtLogin,
|
||||
};
|
||||
};
|
||||
@@ -4,7 +4,7 @@ const { getUserById, updateUser } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
// JWT strategy
|
||||
const jwtLogin = () =>
|
||||
const jwtLogin = async () =>
|
||||
new JwtStrategy(
|
||||
{
|
||||
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
|
||||
|
||||
@@ -23,7 +23,7 @@ const {
|
||||
|
||||
// Check required environment variables
|
||||
if (!LDAP_URL || !LDAP_USER_SEARCH_BASE) {
|
||||
module.exports = null;
|
||||
return null;
|
||||
}
|
||||
|
||||
const searchAttributes = [
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { Strategy: JwtStrategy, ExtractJwt } = require('passport-jwt');
|
||||
const { updateUser, findUser } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
const jwksRsa = require('jwks-rsa');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
/**
|
||||
* @function openIdJwtLogin
|
||||
* @param {import('openid-client').Configuration} openIdConfig - Configuration object for the JWT strategy.
|
||||
* @returns {JwtStrategy}
|
||||
* @description This function creates a JWT strategy for OpenID authentication.
|
||||
* It uses the jwks-rsa library to retrieve the signing key from a JWKS endpoint.
|
||||
* The strategy extracts the JWT from the Authorization header as a Bearer token.
|
||||
* The JWT is then verified using the signing key, and the user is retrieved from the database.
|
||||
*/
|
||||
const openIdJwtLogin = (openIdConfig) =>
|
||||
new JwtStrategy(
|
||||
{
|
||||
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
|
||||
secretOrKeyProvider: jwksRsa.passportJwtSecret({
|
||||
cache: isEnabled(process.env.OPENID_JWKS_URL_CACHE_ENABLED) || true,
|
||||
cacheMaxAge: process.env.OPENID_JWKS_URL_CACHE_TIME
|
||||
? eval(process.env.OPENID_JWKS_URL_CACHE_TIME)
|
||||
: 60000,
|
||||
jwksUri: openIdConfig.serverMetadata().jwks_uri,
|
||||
}),
|
||||
},
|
||||
async (payload, done) => {
|
||||
try {
|
||||
const user = await findUser({ openidId: payload?.sub });
|
||||
|
||||
if (user) {
|
||||
user.id = user._id.toString();
|
||||
if (!user.role) {
|
||||
user.role = SystemRoles.USER;
|
||||
await updateUser(user.id, { role: user.role });
|
||||
}
|
||||
done(null, user);
|
||||
} else {
|
||||
logger.warn(
|
||||
'[openIdJwtLogin] openId JwtStrategy => no user found with the sub claims: ' +
|
||||
payload?.sub,
|
||||
);
|
||||
done(null, false);
|
||||
}
|
||||
} catch (err) {
|
||||
done(err, false);
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = openIdJwtLogin;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user