Compare commits

...

65 Commits

Author SHA1 Message Date
Dustin Healy
70a82652a5 chore: bring back file search and ocr options 2025-08-14 07:50:21 -07:00
Dustin Healy
f211e25aac fix: stop duplication of file in chat on end of response stream 2025-08-13 22:29:51 -07:00
Dustin Healy
800391b264 chore: remove out of scope formatting changes 2025-08-13 20:59:08 -07:00
Andres Restrepo
6605b6c800 feat: implement Anthropic native PDF support with document preservation
- Add comprehensive debug logging throughout PDF processing pipeline
- Refactor attachment processing to separate image and document handling
- Create distinct addImageURLs(), addDocuments(), and processAttachments() methods
- Fix critical bugs in stream handling and parameter passing
- Add streamToBuffer utility for proper stream-to-buffer conversion
- Remove api/agents submodule from repository

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-12 18:26:40 -05:00
Danny Avila
007570b5c6 🎨 style: Add missing markdown font size variable to CSS (#9011) 2025-08-12 10:19:29 -04:00
Jeffrey Bulanadi
5943d5346c 📑 docs: Fix Typos in JSDoc and Doc Files (#8998)
- Fix grammar in translations README: 'if has not been ran'  'if it has not been run'
- Fix spacing in JSDoc comments: 'at theend'  'at the end' (2 instances)
2025-08-12 10:18:55 -04:00
Danny Avila
052e61b735 v0.8.0-rc2 (#9000) 2025-08-11 18:58:21 -04:00
Danny Avila
1ccac58403 🔒 fix: Provider Validation for Social, OpenID, SAML, and LDAP Logins (#8999)
* fix: social login provider crossover

* feat: Enhance OpenID login handling and add tests for provider validation

* refactor: authentication error handling to use ErrorTypes.AUTH_FAILED enum

* refactor: update authentication error handling in LDAP and SAML strategies to use ErrorTypes.AUTH_FAILED enum

* ci: Add validation for login with existing email and different provider in SAML strategy

chore: Add logging for existing users with different providers in LDAP, SAML, and Social Login strategies
2025-08-11 18:51:46 -04:00
Clay Rosenthal
04d74a7e07 🪖 ci: Helm OCI Publishing (#7256)
* Adding helm oci publishing (#3)

Signed-off-by: Clay Rosenthal <clayros@amazon.com>

* Update chart version

Signed-off-by: Clay Rosenthal <clayros@amazon.com>

* Update helm release

Signed-off-by: Clay Rosenthal <clayros@amazon.com>

* Update helm chart package path

Signed-off-by: Clay Rosenthal <clayros@amazon.com>

---------

Signed-off-by: Clay Rosenthal <clayros@amazon.com>
2025-08-11 16:21:05 -04:00
github-actions[bot]
0fdca8ddbd 🌍 i18n: Update translation.json with latest translations (#8996)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-11 16:19:37 -04:00
Danny Avila
c5ca621efd 🧑‍💼 feat: Add Agent Model Validation (#8995)
* fix: Update logger import to use data-schemas module

* feat: agent model validation

* fix: Remove invalid error messages from translation file
2025-08-11 14:26:28 -04:00
Danny Avila
8cefa566da 📸 fix: Avatar Handling for Social Login (#8993)
- Updated `handleExistingUser` function to improve avatar handling logic, including checks for manual flags and null/undefined avatars.
- Introduced a new test suite for `handleExistingUser` covering various scenarios, ensuring robust functionality for avatar updates in both local and non-local storage contexts.
2025-08-11 11:50:46 -04:00
Danny Avila
7e4c8a5d0d 🛡️ fix: OTP Verification For 2FA Disable Operation (#8975) 2025-08-10 15:05:16 -04:00
Danny Avila
edf33bedcb 🛂 feat: Payload limits and Validation for User-created Memories (#8974) 2025-08-10 14:46:16 -04:00
Dustin Healy
21e00168b1 🪙 fix: Max Output Tokens Refactor for Responses API (#8972)
🪙 fix: Max Output Tokens Refactor for Responses API (#8972)

chore: Remove `max_output_tokens` from model kwargs in `titleConvo` if provided
2025-08-10 14:08:35 -04:00
github-actions[bot]
da3730b7d6 🌍 i18n: Update translation.json with latest translations (#8957)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-09 12:04:56 -04:00
Danny Avila
770c766d50 🔧 refactor: Move Plugin-related Helpers to TS API and Add Tests (#8961) 2025-08-09 12:02:44 -04:00
alkshmir
5eb6926464 🪖 chore: Fix Typo in helm/librechat/values.yaml (#8960) 2025-08-09 12:00:37 -04:00
Danny Avila
e478ae1c28 📦 chore: Bump @librechat/agents to v2.4.75 (#8956) 2025-08-09 00:01:21 -04:00
Danny Avila
0c9284c8ae 🧠 refactor: Memory Timeout after Completion and Guarantee Stream Final Event (#8955) 2025-08-09 00:01:10 -04:00
Danny Avila
4eeadddfe6 🔮 fix: Artifacts readOnly to Re-render when Expected (#8954) 2025-08-08 22:44:58 -04:00
Dustin Healy
9ca1847535 🔧 refactor: customUserVar Error Normalization (#8950)
* fix: localization string had unused template var

* fix: add normalizeHttpError to hopefully stop UI hangs when an error is returned in UserController

- Ensures updateUserPluginsController always returns valid HTTP status codes instead of undefined
- Add normalizeHttpError() helper to safely extract status/message from errors
- Default to 400 status code when Error.status is undefined/invalid

* refactor: move normalizeHttpError to packages/api
2025-08-08 15:53:04 -04:00
Danny Avila
5d0bc95193 🧪 fix: Editor Styling, Incomplete Artifact Editing, Optimize Artifact Context (#8953)
* refactor: optimize artifacts context for improved performance

* fix: layout classes for artifacts editor

* chore: linting

* fix: enhance artifact mutation handling in CodeEditor to prevent infinite retries

* fix: handle incomplete artifacts in replaceArtifactContent and add regression tests
2025-08-08 15:49:58 -04:00
github-actions[bot]
e7d6100fe4 🌍 i18n: Update translation.json with latest translations (#8934)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-08 12:21:27 -04:00
Danny Avila
01a95229f2 📦 chore: Bump @librechat/agents to v2.4.73 (#8949) 2025-08-08 12:19:36 -04:00
Danny Avila
0939250f07 🛣️ fix: Remove Title Tokens Limit for GPT-5 Models (#8948)
* 🛣️ fix: Remove Title Tokens Limit for GPT-5 Models

* 🛣️ fix: Remove max_completion_tokens from modelKwargs when maxTokens is disabled

* chore: Add test-image* to .gitignore for CI/CD data
2025-08-08 11:15:29 -04:00
Danny Avila
7147bce3c3 feat: Add OpenAI Verbosity Parameter (#8929)
* WIP: Verbosity OpenAI Parameter

* 🔧 chore: remove unused import of extractEnvVariable from parsers.ts

*  feat: add comprehensive tests for getOpenAIConfig and enhance verbosity handling

* fix: Handling for maxTokens in GPT-5+ models and add corresponding tests

* feat: Implement GPT-5+ model handling in processMemory function
2025-08-07 20:49:40 -04:00
github-actions[bot]
486fe34a2b 🌍 i18n: Update translation.json with latest translations (#8924)
* 🌍 i18n: Update translation.json with latest translations

* Update translation.json

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-08-07 20:42:44 -04:00
github-actions[bot]
922f43f520 🌍 i18n: Update translation.json with latest translations (#8907)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-07 16:25:24 -04:00
Marco Beretta
e6fa01d514 💬 style: Enhance Tooltip with HTML support and Improve Styling (#8915)
*  feat: Enhance Tooltip component with HTML support and styling improvements

*  feat: Integrate DOMPurify for HTML sanitization in Tooltip component
2025-08-07 16:24:42 -04:00
Danny Avila
8238fb49e0 📦 chore: bump @librechat/agents to v2.4.72 2025-08-07 16:23:12 -04:00
Danny Avila
430557676d feat: GPT-5 Token Limits, Rates, Icon, Reasoning Support 2025-08-07 16:23:11 -04:00
Danny Avila
8a5047c456 📦 chore: bump @librechat/agents to v2.4.71 2025-08-07 16:23:11 -04:00
Danny Avila
c787515894 🧠 feat: Add minimal Reasoning Effort option 2025-08-07 16:23:11 -04:00
Danny Avila
d95d8032cc feat: GPT-OSS models Token Limits & Rates 2025-08-07 16:23:10 -04:00
Joseph Licata
b9f72f4869 🎚️ refactor: Update Min. Values for OpenAI Parameters (#8922) 2025-08-07 14:38:08 -04:00
Danny Avila
429bb6653a 📦 chore: bump @librechat/agents to v2.4.70 (#8923) 2025-08-07 14:36:10 -04:00
Dustin Healy
47caafa8f8 🔧 fix: MCP Queries and Connections (#8870)
* fix: add refetchQueries on connection success so ToolSelectDialog doesn't require hard refresh

* fix: change hook so we only query connection status when mcpServers are configured

* fix: change refetchQueries to invalidateQueries for tools after server connection update

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-08-07 02:31:05 -04:00
Dustin Healy
8530594f37 🟢 fix: Incorrect customUserVars Set States (#8905) 2025-08-07 02:19:06 -04:00
Sebastien Bruel
0b071c06f6 🥞 refactor: Duplicate Agent Versions as Informational Instead of Errors (#8881)
* Fix error when updating an agent with no changes

* Add tests

* Revert translation file changes
2025-08-07 02:12:05 -04:00
Danny Avila
1092392ed8 📂 fix: File Cleanup for Uploaded "Agent" Files (#8900) 2025-08-06 19:45:57 -04:00
Danny Avila
36c8947029 🔄 refactor: Select OpenRouter LLM Class Dynamically by baseURL (#8898) 2025-08-06 19:26:40 -04:00
Danny Avila
4175a3ea19 v0.8.0-rc1 (#8846) 2025-08-04 15:25:07 -04:00
github-actions[bot]
02dc71f4b7 🌍 i18n: Update translation.json with latest translations (#8845)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-04 15:24:24 -04:00
github-actions[bot]
a6c99a3267 🌍 i18n: Update translation.json with latest translations (#8828)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-04 14:54:07 -04:00
SollalF
fcefc6eedf feat: Add OpenID Audience Parameter (#8837)
*  feat: Add OpenID audience parameter support in authorization requests

* Updated .env.example to include OPENID_AUDIENCE variable for configuration.
* Enhanced openidStrategy to set the audience parameter in authorization requests if specified, improving OpenID integration.

* Update .env.example

* Update openidStrategy.js

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-08-04 14:49:36 -04:00
Marco Beretta
dfdafdbd09 🖌️ feat: add animation styles for popovers and tooltips (#8831)
* feat: Update client version to 0.2.2 and add animation styles for popovers and tooltips

* refactor: Remove focus outline styles from Dropdown component

* feat: Update client version to 0.2.3 and add Select component export

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-08-04 14:44:00 -04:00
Danny Avila
33834cd484 🧹 feat: Automatic File Cleanup for Mistral OCR Uploads (#8827)
* chore: Handle optional token_endpoint in OAuth metadata discovery

* chore: Simplify permission typing logic in checkAccess function

* feat: Implement `deleteMistralFile` function and integrate file cleanup in `uploadMistralOCR`
2025-08-03 17:11:14 -04:00
Danny Avila
7ef2c626e2 🛠️ feat: Add Reset-Meili-Sync Script for MongoDB Flags (#8823) 2025-08-02 18:04:04 -04:00
Danny Avila
bc43423f58 🌍 i18n: Add Tibetan and Ukrainian languages to localization (#8819)
* 🌍 i18n: Add Tibetan and Ukrainian languages to localization

* feat: Update language selector to include Tibetan and Ukrainian options
* feat: Add translation files for Tibetan and Ukrainian languages
* chore: Update English translation.json with new language keys
* docs: Create localization guide for adding new languages

* Update README.md
2025-08-02 12:37:18 -04:00
Danny Avila
863401bcdf 🔧 fix: Assistants API SDK calls to match Updated Arguments (#8818)
* chore: remove comments in agents endpoint error handler

* chore: improve openai sdk typing

* chore: improve typing for azure asst init

* 🔧 fix: Assistants API SDK calls to match Updated Arguments
2025-08-02 12:19:58 -04:00
Danny Avila
33c8b87edd 📦 chore: Bump @modelcontextprotocol/sdk to v1.17.1 (#8809) 2025-08-01 16:10:12 -04:00
github-actions[bot]
077248a8a7 🌍 i18n: Update translation.json with latest translations (#8808)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-01 15:55:42 -04:00
Danny Avila
c6fb4686ef 🌐 ci: Bump Locize i18n Action Version 2025-08-01 15:52:00 -04:00
Dustin Healy
f1c6e4d55e 🧪 ci: Unit Tests for MCP Routes (#8803)
* refactor: remove unreachable if checks from mcp routes

* test: add tests for mcp routes
2025-08-01 14:47:00 -04:00
William Kim
e192c99c7d 🔧 fix: Apply Convo Export filename sanitization at export, not input (#8779)
Co-authored-by: Woosub Kim <woosub.kim@navercorp.com>
2025-07-31 07:28:33 -04:00
wartek69
056172f007 🔒 feat: MCP OAuth Config for Metadata Parameters (#8691)
* fix(mcp): add default metadata for pre-configured oauth

* removed lingering comment

* added configurable options & jest unit tests

* Update handler.test.ts

* Update handler.ts

---------

Co-authored-by: Alex <aleksander.chernyavskiy@seafar.eu>
Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-07-31 07:24:49 -04:00
github-actions[bot]
5eed5009e9 🌍 i18n: Update translation.json with latest translations (#8771)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-30 15:59:27 -04:00
Danny Avila
6fc9abd4ad ✂️ fix: Remove Image Payloads from Memory Processing (#8770) 2025-07-30 15:54:33 -04:00
github-actions[bot]
03a924eaca 🌍 i18n: Update translation.json with latest translations (#8739)
* 🌍 i18n: Update translation.json with latest translations

* chore: Remove unused translation keys for MCP custom variables

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-30 15:23:12 -04:00
Danny Avila
25c993d93e 📦 chore: bump @librechat/agents to v2.4.69 (#8769) 2025-07-30 15:18:17 -04:00
Marco Beretta
09659c1040 🔨 style: Improve MCP UI (#8745)
* refactor: Enhance MCP components with improved UI elements and localization updates

* refactor: Clean up MCP components by removing unused imports and improving layout

* refactor: Update server status badge styling for improved UI consistency

* refactor: Move group up a level so 'X' and background highlight occur at same time for cancellation button

* refactor: Remove unused translation keys from the localization file

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
2025-07-30 14:56:22 -04:00
Josh Mullin
19a8f5c545 🐦 fix: Prioritize OIDC Username Claims to Prevent First Name Usernames (#8695)
Now prioritizes preferred_username claim, then the nonstandard
username claim, then email.

Removed given_name as a possible username choice to avoid exposing users’ first names as
usernames.

Updated openidStrategy.spec.js to reflect the new claim order.

Fixed mock OpenID server behavior where preferred_username was always
hardcoded, causing test failures.

Adjusted OpenID setup test to align with new username parameter
behavior.
2025-07-30 14:43:42 -04:00
Dustin Healy
1050346915 feat: Add Support for customUserVar Replacement in 'args' Field (#8743) 2025-07-30 14:37:56 -04:00
Marco Beretta
8a1a38f346 🔑 fix: Update Conversation Mutation to use ID from Payload (#8758) 2025-07-30 14:34:30 -04:00
201 changed files with 8073 additions and 1192 deletions

View File

@@ -442,6 +442,8 @@ OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_USERNAME_CLAIM=
# Set to determine which user info property returned from OpenID Provider to store as the User's name
OPENID_NAME_CLAIM=
# Optional audience parameter for OpenID authorization requests
OPENID_AUDIENCE=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=

View File

@@ -4,12 +4,13 @@ name: Build Helm Charts on Tag
on:
push:
tags:
- "*"
- "chart-*"
jobs:
release:
permissions:
contents: write
packages: write
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -26,15 +27,49 @@ jobs:
uses: azure/setup-helm@v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Build Subchart Deps
run: |
cd helm/librechat-rag-api
helm dependency build
cd helm/librechat
helm dependency build
cd ../librechat-rag-api
helm dependency build
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0
- name: Get Chart Version
id: chart-version
run: |
CHART_VERSION=$(echo "${{ github.ref_name }}" | cut -d'-' -f2)
echo "CHART_VERSION=${CHART_VERSION}" >> "$GITHUB_OUTPUT"
# Log in to GitHub Container Registry
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
charts_dir: helm
skip_existing: true
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Run Helm OCI Charts Releaser
# This is for the librechat chart
- name: Release Helm OCI Charts for librechat
uses: appany/helm-oci-chart-releaser@v0.4.2
with:
name: librechat
repository: ${{ github.actor }}/librechat-chart
tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
path: helm/librechat
registry: ghcr.io
registry_username: ${{ github.actor }}
registry_password: ${{ secrets.GITHUB_TOKEN }}
# this is for the librechat-rag-api chart
- name: Release Helm OCI Charts for librechat-rag-api
uses: appany/helm-oci-chart-releaser@v0.4.2
with:
name: librechat-rag-api
repository: ${{ github.actor }}/librechat-chart
tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
path: helm/librechat-rag-api
registry: ghcr.io
registry_username: ${{ github.actor }}
registry_password: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -48,7 +48,7 @@ jobs:
# 2. Download translation files from locize.
- name: Download Translations from locize
uses: locize/download@v1
uses: locize/download@v2
with:
project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
path: "client/src/locales"

3
.gitignore vendored
View File

@@ -13,6 +13,9 @@ pids
*.seed
.git
# CI/CD data
test-image*
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

View File

@@ -1,4 +1,4 @@
# v0.7.9
# v0.8.0-rc2
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.7.9
# v0.8.0-rc2
# Base for all builds
FROM node:20-alpine AS base-min

View File

@@ -27,6 +27,7 @@ const {
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { encodeAndFormatDocuments } = require('~/server/services/Files/documents');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -312,6 +313,33 @@ class AnthropicClient extends BaseClient {
return files;
}
async addDocuments(message, attachments) {
// Only process documents
const documentResult = await encodeAndFormatDocuments(
this.options.req,
attachments,
EModelEndpoint.anthropic,
);
message.documents =
documentResult.documents && documentResult.documents.length
? documentResult.documents
: undefined;
return documentResult.files;
}
async processAttachments(message, attachments) {
// Process both images and documents
const [imageFiles, documentFiles] = await Promise.all([
this.addImageURLs(message, attachments),
this.addDocuments(message, attachments),
]);
// Combine files from both processors
return [...imageFiles, ...documentFiles];
}
/**
* @param {object} params
* @param {number} params.promptTokens
@@ -382,7 +410,7 @@ class AnthropicClient extends BaseClient {
};
}
const files = await this.addImageURLs(latestMessage, attachments);
const files = await this.processAttachments(latestMessage, attachments);
this.options.attachments = files;
}
@@ -941,7 +969,7 @@ class AnthropicClient extends BaseClient {
const content = `<conversation_context>
${convo}
</conversation_context>
Please generate a title for this conversation.`;
const titleMessage = { role: 'user', content };

View File

@@ -1233,7 +1233,7 @@ class BaseClient {
{},
);
await this.addImageURLs(message, files, this.visionMode);
await this.processAttachments(message, files, this.visionMode);
this.message_file_map[message.messageId] = files;
return message;

View File

@@ -268,7 +268,7 @@ class GoogleClient extends BaseClient {
const formattedMessages = [];
const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative);
const files = await this.processAttachments(latestMessage, attachments, VisionModes.generative);
this.options.attachments = files;
messages[messages.length - 1] = latestMessage;
@@ -312,6 +312,20 @@ class GoogleClient extends BaseClient {
return files;
}
// eslint-disable-next-line no-unused-vars
async addDocuments(message, attachments) {
// GoogleClient doesn't support document processing yet
// Return empty results for consistency
return [];
}
async processAttachments(message, attachments, mode = '') {
// For GoogleClient, only process images
const imageFiles = await this.addImageURLs(message, attachments, mode);
const documentFiles = await this.addDocuments(message, attachments);
return [...imageFiles, ...documentFiles];
}
/**
* Builds the augmented prompt for attachments
* TODO: Add File API Support
@@ -345,7 +359,7 @@ class GoogleClient extends BaseClient {
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
const files = await this.addImageURLs(latestMessage, attachments);
const files = await this.processAttachments(latestMessage, attachments);
this.options.attachments = files;

View File

@@ -372,6 +372,19 @@ class OpenAIClient extends BaseClient {
return files;
}
async addDocuments(message, attachments) {
// OpenAI doesn't support native document processing yet
// Return empty results for consistency
return [];
}
async processAttachments(message, attachments) {
// For OpenAI, only process images
const imageFiles = await this.addImageURLs(message, attachments);
const documentFiles = await this.addDocuments(message, attachments);
return [...imageFiles, ...documentFiles];
}
async buildMessages(messages, parentMessageId, { promptPrefix = null }, opts) {
let orderedMessages = this.constructor.getMessagesForConversation({
messages,
@@ -400,7 +413,7 @@ class OpenAIClient extends BaseClient {
};
}
const files = await this.addImageURLs(
const files = await this.processAttachments(
orderedMessages[orderedMessages.length - 1],
attachments,
);
@@ -1222,7 +1235,9 @@ ${convo}
}
if (this.isOmni === true && modelOptions.max_tokens != null) {
modelOptions.max_completion_tokens = modelOptions.max_tokens;
const paramName =
modelOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
modelOptions[paramName] = modelOptions.max_tokens;
delete modelOptions.max_tokens;
}
if (this.isOmni === true && modelOptions.temperature != null) {

View File

@@ -3,24 +3,61 @@ const { EModelEndpoint, ContentTypes } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages');
/**
* Formats a message to OpenAI Vision API payload format.
* Formats a message with document attachments for specific endpoints.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
* @param {Array<Object>} [params.documents] - The document attachments for the message.
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @returns {(Object)} - The formatted message.
*/
const formatDocumentMessage = ({ message, documents, endpoint }) => {
const contentParts = [];
// Add documents first (for Anthropic PDFs)
if (documents && documents.length > 0) {
contentParts.push(...documents);
}
// Add text content
contentParts.push({ type: ContentTypes.TEXT, text: message.content });
if (endpoint === EModelEndpoint.anthropic) {
message.content = contentParts;
return message;
}
// For other endpoints, might need different handling
message.content = contentParts;
return message;
};
/**
* Formats a message with vision capabilities (image_urls) for specific endpoints.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @returns {(Object)} - The formatted message.
*/
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
const contentParts = [];
// Add images
if (image_urls && image_urls.length > 0) {
contentParts.push(...image_urls);
}
// Add text content
contentParts.push({ type: ContentTypes.TEXT, text: message.content });
if (endpoint === EModelEndpoint.anthropic) {
message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }];
message.content = contentParts;
return message;
}
message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls];
return message;
};
@@ -58,7 +95,18 @@ const formatMessage = ({ message, userName, assistantName, endpoint, langChain =
content,
};
const { image_urls } = message;
const { image_urls, documents } = message;
// Handle documents
if (Array.isArray(documents) && documents.length > 0 && role === 'user') {
return formatDocumentMessage({
message: formattedMessage,
documents: message.documents,
endpoint,
});
}
// Handle images
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
return formatVisionMessage({
message: formattedMessage,
@@ -146,7 +194,21 @@ const formatAgentMessages = (payload) => {
message.content = [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: message.content }];
}
if (message.role !== 'assistant') {
messages.push(formatMessage({ message, langChain: true }));
// Check if message has documents and preserve array structure
const hasDocuments =
Array.isArray(message.content) &&
message.content.some((part) => part && part.type === 'document');
if (hasDocuments && message.role === 'user') {
// For user messages with documents, create HumanMessage directly with array content
messages.push(new HumanMessage({ content: message.content }));
} else if (hasDocuments && message.role === 'system') {
// For system messages with documents, create SystemMessage directly with array content
messages.push(new SystemMessage({ content: message.content }));
} else {
// Use regular formatting for messages without documents
messages.push(formatMessage({ message, langChain: true }));
}
continue;
}
@@ -239,6 +301,8 @@ const formatAgentMessages = (payload) => {
module.exports = {
formatMessage,
formatDocumentMessage,
formatVisionMessage,
formatFromLangChain,
formatAgentMessages,
formatLangChainMessages,

View File

@@ -316,17 +316,10 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
if (shouldCreateVersion) {
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions, actionsHash);
if (duplicateVersion && !forceVersion) {
const error = new Error(
'Duplicate version: This would create a version identical to an existing one',
);
error.statusCode = 409;
error.details = {
duplicateVersion,
versionIndex: versions.findIndex(
(v) => JSON.stringify(duplicateVersion) === JSON.stringify(v),
),
};
throw error;
// No changes detected, return the current agent without creating a new version
const agentObj = currentAgent.toObject();
agentObj.version = versions.length;
return agentObj;
}
}

View File

@@ -879,45 +879,31 @@ describe('models/Agent', () => {
expect(emptyParamsAgent.model_parameters).toEqual({});
});
test('should detect duplicate versions and reject updates', async () => {
const originalConsoleError = console.error;
console.error = jest.fn();
test('should not create new version for duplicate updates', async () => {
const authorId = new mongoose.Types.ObjectId();
const testCases = generateVersionTestCases();
try {
const authorId = new mongoose.Types.ObjectId();
const testCases = generateVersionTestCases();
for (const testCase of testCases) {
const testAgentId = `agent_${uuidv4()}`;
for (const testCase of testCases) {
const testAgentId = `agent_${uuidv4()}`;
await createAgent({
id: testAgentId,
provider: 'test',
model: 'test-model',
author: authorId,
...testCase.initial,
});
await createAgent({
id: testAgentId,
provider: 'test',
model: 'test-model',
author: authorId,
...testCase.initial,
});
const updatedAgent = await updateAgent({ id: testAgentId }, testCase.update);
expect(updatedAgent.versions).toHaveLength(2); // No new version created
await updateAgent({ id: testAgentId }, testCase.update);
// Update with duplicate data should succeed but not create a new version
const duplicateUpdate = await updateAgent({ id: testAgentId }, testCase.duplicate);
let error;
try {
await updateAgent({ id: testAgentId }, testCase.duplicate);
} catch (e) {
error = e;
}
expect(duplicateUpdate.versions).toHaveLength(2); // No new version created
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
expect(error.details).toBeDefined();
expect(error.details.duplicateVersion).toBeDefined();
const agent = await getAgent({ id: testAgentId });
expect(agent.versions).toHaveLength(2);
}
} finally {
console.error = originalConsoleError;
const agent = await getAgent({ id: testAgentId });
expect(agent.versions).toHaveLength(2);
}
});
@@ -1093,20 +1079,13 @@ describe('models/Agent', () => {
expect(secondUpdate.versions).toHaveLength(3);
// Update without forceVersion and no changes should not create a version
let error;
try {
await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: false },
);
} catch (e) {
error = e;
}
const duplicateUpdate = await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: false },
);
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
expect(duplicateUpdate.versions).toHaveLength(3); // No new version created
});
test('should handle isDuplicateVersion with arrays containing null/undefined values', async () => {
@@ -2400,11 +2379,18 @@ describe('models/Agent', () => {
agent_ids: ['agent1', 'agent2'],
});
await updateAgent({ id: agentId }, { agent_ids: ['agent1', 'agent2', 'agent3'] });
const updatedAgent = await updateAgent(
{ id: agentId },
{ agent_ids: ['agent1', 'agent2', 'agent3'] },
);
expect(updatedAgent.versions).toHaveLength(2);
await expect(
updateAgent({ id: agentId }, { agent_ids: ['agent1', 'agent2', 'agent3'] }),
).rejects.toThrow('Duplicate version');
// Update with same agent_ids should succeed but not create a new version
const duplicateUpdate = await updateAgent(
{ id: agentId },
{ agent_ids: ['agent1', 'agent2', 'agent3'] },
);
expect(duplicateUpdate.versions).toHaveLength(2); // No new version created
});
test('should handle agent_ids field alongside other fields', async () => {
@@ -2543,9 +2529,10 @@ describe('models/Agent', () => {
expect(updated.versions).toHaveLength(2);
expect(updated.agent_ids).toEqual([]);
await expect(updateAgent({ id: agentId }, { agent_ids: [] })).rejects.toThrow(
'Duplicate version',
);
// Update with same empty agent_ids should succeed but not create a new version
const duplicateUpdate = await updateAgent({ id: agentId }, { agent_ids: [] });
expect(duplicateUpdate.versions).toHaveLength(2); // No new version created
expect(duplicateUpdate.agent_ids).toEqual([]);
});
test('should handle agent without agent_ids field', async () => {

View File

@@ -1,4 +1,4 @@
const { matchModelName } = require('../utils');
const { matchModelName } = require('../utils/tokens');
const defaultRate = 6;
/**
@@ -87,6 +87,9 @@ const tokenValues = Object.assign(
'gpt-4.1': { prompt: 2, completion: 8 },
'gpt-4.5': { prompt: 75, completion: 150 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-5': { prompt: 1.25, completion: 10 },
'gpt-5-mini': { prompt: 0.25, completion: 2 },
'gpt-5-nano': { prompt: 0.05, completion: 0.4 },
'gpt-4o': { prompt: 2.5, completion: 10 },
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
'gpt-4-1106': { prompt: 10, completion: 30 },
@@ -147,6 +150,9 @@ const tokenValues = Object.assign(
codestral: { prompt: 0.3, completion: 0.9 },
'ministral-8b': { prompt: 0.1, completion: 0.1 },
'ministral-3b': { prompt: 0.04, completion: 0.04 },
// GPT-OSS models
'gpt-oss-20b': { prompt: 0.05, completion: 0.2 },
'gpt-oss-120b': { prompt: 0.15, completion: 0.6 },
},
bedrockValues,
);
@@ -214,6 +220,12 @@ const getValueKey = (model, endpoint) => {
return 'gpt-4.1';
} else if (modelName.includes('gpt-4o-2024-05-13')) {
return 'gpt-4o-2024-05-13';
} else if (modelName.includes('gpt-5-nano')) {
return 'gpt-5-nano';
} else if (modelName.includes('gpt-5-mini')) {
return 'gpt-5-mini';
} else if (modelName.includes('gpt-5')) {
return 'gpt-5';
} else if (modelName.includes('gpt-4o-mini')) {
return 'gpt-4o-mini';
} else if (modelName.includes('gpt-4o')) {

View File

@@ -25,8 +25,14 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4-some-other-info')).toBe('8k');
});
it('should return undefined for model names that do not match any known patterns', () => {
expect(getValueKey('gpt-5-some-other-info')).toBeUndefined();
it('should return "gpt-5" for model name containing "gpt-5"', () => {
expect(getValueKey('gpt-5-some-other-info')).toBe('gpt-5');
expect(getValueKey('gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-2025-01-30-0130')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-turbo')).toBe('gpt-5');
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
});
it('should return "gpt-3.5-turbo-1106" for model name containing "gpt-3.5-turbo-1106"', () => {
@@ -84,6 +90,29 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4.1-nano-0125')).toBe('gpt-4.1-nano');
});
it('should return "gpt-5" for model type of "gpt-5"', () => {
expect(getValueKey('gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-2025-01-30-0130')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-turbo')).toBe('gpt-5');
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
});
it('should return "gpt-5-mini" for model type of "gpt-5-mini"', () => {
expect(getValueKey('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
expect(getValueKey('openai/gpt-5-mini')).toBe('gpt-5-mini');
expect(getValueKey('gpt-5-mini-0130')).toBe('gpt-5-mini');
expect(getValueKey('gpt-5-mini-2025-01-30-0130')).toBe('gpt-5-mini');
});
it('should return "gpt-5-nano" for model type of "gpt-5-nano"', () => {
expect(getValueKey('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
expect(getValueKey('openai/gpt-5-nano')).toBe('gpt-5-nano');
expect(getValueKey('gpt-5-nano-0130')).toBe('gpt-5-nano');
expect(getValueKey('gpt-5-nano-2025-01-30-0130')).toBe('gpt-5-nano');
});
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
@@ -207,6 +236,48 @@ describe('getMultiplier', () => {
);
});
it('should return the correct multiplier for gpt-5', () => {
const valueKey = getValueKey('gpt-5-2025-01-30');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-5'].completion,
);
expect(getMultiplier({ model: 'gpt-5-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-5', tokenType: 'completion' })).toBe(
tokenValues['gpt-5'].completion,
);
});
it('should return the correct multiplier for gpt-5-mini', () => {
const valueKey = getValueKey('gpt-5-mini-2025-01-30');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-mini'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-5-mini'].completion,
);
expect(getMultiplier({ model: 'gpt-5-mini-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5-mini'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-5-mini', tokenType: 'completion' })).toBe(
tokenValues['gpt-5-mini'].completion,
);
});
it('should return the correct multiplier for gpt-5-nano', () => {
const valueKey = getValueKey('gpt-5-nano-2025-01-30');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-nano'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-5-nano'].completion,
);
expect(getMultiplier({ model: 'gpt-5-nano-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5-nano'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-5-nano', tokenType: 'completion' })).toBe(
tokenValues['gpt-5-nano'].completion,
);
});
it('should return the correct multiplier for gpt-4o', () => {
const valueKey = getValueKey('gpt-4o-2024-08-06');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
@@ -307,10 +378,22 @@ describe('getMultiplier', () => {
});
it('should return defaultRate if derived valueKey does not match any known patterns', () => {
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-5-some-other-info' })).toBe(
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-10-some-other-info' })).toBe(
defaultRate,
);
});
it('should return correct multipliers for GPT-OSS models', () => {
const models = ['gpt-oss-20b', 'gpt-oss-120b'];
models.forEach((key) => {
const expectedPrompt = tokenValues[key].prompt;
const expectedCompletion = tokenValues[key].completion;
expect(getMultiplier({ valueKey: key, tokenType: 'prompt' })).toBe(expectedPrompt);
expect(getMultiplier({ valueKey: key, tokenType: 'completion' })).toBe(expectedCompletion);
expect(getMultiplier({ model: key, tokenType: 'prompt' })).toBe(expectedPrompt);
expect(getMultiplier({ model: key, tokenType: 'completion' })).toBe(expectedCompletion);
});
});
});
describe('AWS Bedrock Model Tests', () => {

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.7.9",
"version": "v0.8.0-rc2",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -49,10 +49,10 @@
"@langchain/google-vertexai": "^0.2.13",
"@langchain/openai": "^0.5.18",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.68",
"@librechat/agents": "^2.4.75",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.17.0",
"@modelcontextprotocol/sdk": "^1.17.1",
"@node-saml/passport-saml": "^5.1.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2",

View File

@@ -1,46 +0,0 @@
const { logger } = require('~/config');
//handle duplicates
const handleDuplicateKeyError = (err, res) => {
logger.error('Duplicate key error:', err.keyValue);
const field = `${JSON.stringify(Object.keys(err.keyValue))}`;
const code = 409;
res
.status(code)
.send({ messages: `An document with that ${field} already exists.`, fields: field });
};
//handle validation errors
const handleValidationError = (err, res) => {
logger.error('Validation error:', err.errors);
let errors = Object.values(err.errors).map((el) => el.message);
let fields = `${JSON.stringify(Object.values(err.errors).map((el) => el.path))}`;
let code = 400;
if (errors.length > 1) {
errors = errors.join(' ');
res.status(code).send({ messages: `${JSON.stringify(errors)}`, fields: fields });
} else {
res.status(code).send({ messages: `${JSON.stringify(errors)}`, fields: fields });
}
};
module.exports = (err, _req, res, _next) => {
try {
if (err.name === 'ValidationError') {
return handleValidationError(err, res);
}
if (err.code && err.code == 11000) {
return handleDuplicateKeyError(err, res);
}
// Special handling for errors like SyntaxError
if (err.statusCode && err.body) {
return res.status(err.statusCode).send(err.body);
}
logger.error('ErrorController => error', err);
return res.status(500).send('An unknown error occurred.');
} catch (err) {
logger.error('ErrorController => processing error', err);
return res.status(500).send('Processing error in ErrorController.');
}
};

View File

@@ -5,6 +5,7 @@ const { logger } = require('~/config');
/**
* @param {ServerRequest} req
* @returns {Promise<TModelsConfig>} The models config.
*/
const getModelsConfig = async (req) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);

View File

@@ -1,54 +1,16 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, AuthType, Constants } = require('librechat-data-provider');
const { CacheKeys, Constants } = require('librechat-data-provider');
const {
getToolkitKey,
checkPluginAuth,
filterUniquePlugins,
convertMCPToolsToPlugins,
} = require('@librechat/api');
const { getCustomConfig, getCachedTools } = require('~/server/services/Config');
const { getToolkitKey } = require('~/server/services/ToolService');
const { availableTools, toolkits } = require('~/app/clients/tools');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { availableTools } = require('~/app/clients/tools');
const { getLogStores } = require('~/cache');
/**
* Filters out duplicate plugins from the list of plugins.
*
* @param {TPlugin[]} plugins The list of plugins to filter.
* @returns {TPlugin[]} The list of plugins with duplicates removed.
*/
const filterUniquePlugins = (plugins) => {
const seen = new Set();
return plugins.filter((plugin) => {
const duplicate = seen.has(plugin.pluginKey);
seen.add(plugin.pluginKey);
return !duplicate;
});
};
/**
* Determines if a plugin is authenticated by checking if all required authentication fields have non-empty values.
* Supports alternate authentication fields, allowing validation against multiple possible environment variables.
*
* @param {TPlugin} plugin The plugin object containing the authentication configuration.
* @returns {boolean} True if the plugin is authenticated for all required fields, false otherwise.
*/
const checkPluginAuth = (plugin) => {
if (!plugin.authConfig || plugin.authConfig.length === 0) {
return false;
}
return plugin.authConfig.every((authFieldObj) => {
const authFieldOptions = authFieldObj.authField.split('||');
let isFieldAuthenticated = false;
for (const fieldOption of authFieldOptions) {
const envValue = process.env[fieldOption];
if (envValue && envValue.trim() !== '' && envValue !== AuthType.USER_PROVIDED) {
isFieldAuthenticated = true;
break;
}
}
return isFieldAuthenticated;
});
};
const getAvailablePluginsController = async (req, res) => {
try {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
@@ -143,9 +105,9 @@ const getAvailableTools = async (req, res) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedToolsArray = await cache.get(CacheKeys.TOOLS);
const cachedUserTools = await getCachedTools({ userId });
const userPlugins = convertMCPToolsToPlugins(cachedUserTools, customConfig);
const userPlugins = convertMCPToolsToPlugins({ functionTools: cachedUserTools, customConfig });
if (cachedToolsArray && userPlugins) {
if (cachedToolsArray != null && userPlugins != null) {
const dedupedTools = filterUniquePlugins([...userPlugins, ...cachedToolsArray]);
res.status(200).json(dedupedTools);
return;
@@ -185,7 +147,9 @@ const getAvailableTools = async (req, res) => {
const isToolDefined = toolDefinitions[plugin.pluginKey] !== undefined;
const isToolkit =
plugin.toolkit === true &&
Object.keys(toolDefinitions).some((key) => getToolkitKey(key) === plugin.pluginKey);
Object.keys(toolDefinitions).some(
(key) => getToolkitKey({ toolkits, toolName: key }) === plugin.pluginKey,
);
if (!isToolDefined && !isToolkit) {
continue;
@@ -235,58 +199,6 @@ const getAvailableTools = async (req, res) => {
}
};
/**
* Converts MCP function format tools to plugin format
* @param {Object} functionTools - Object with function format tools
* @param {Object} customConfig - Custom configuration for MCP servers
* @returns {Array} Array of plugin objects
*/
function convertMCPToolsToPlugins(functionTools, customConfig) {
const plugins = [];
for (const [toolKey, toolData] of Object.entries(functionTools)) {
if (!toolData.function || !toolKey.includes(Constants.mcp_delimiter)) {
continue;
}
const functionData = toolData.function;
const parts = toolKey.split(Constants.mcp_delimiter);
const serverName = parts[parts.length - 1];
const serverConfig = customConfig?.mcpServers?.[serverName];
const plugin = {
name: parts[0], // Use the tool name without server suffix
pluginKey: toolKey,
description: functionData.description || '',
authenticated: true,
icon: serverConfig?.iconPath,
};
// Build authConfig for MCP tools
if (!serverConfig?.customUserVars) {
plugin.authConfig = [];
plugins.push(plugin);
continue;
}
const customVarKeys = Object.keys(serverConfig.customUserVars);
if (customVarKeys.length === 0) {
plugin.authConfig = [];
} else {
plugin.authConfig = Object.entries(serverConfig.customUserVars).map(([key, value]) => ({
authField: key,
label: value.title || key,
description: value.description || '',
}));
}
plugins.push(plugin);
}
return plugins;
}
module.exports = {
getAvailableTools,
getAvailablePluginsController,

View File

@@ -28,19 +28,211 @@ jest.mock('~/config', () => ({
jest.mock('~/app/clients/tools', () => ({
availableTools: [],
toolkits: [],
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(),
}));
jest.mock('@librechat/api', () => ({
getToolkitKey: jest.fn(),
checkPluginAuth: jest.fn(),
filterUniquePlugins: jest.fn(),
convertMCPToolsToPlugins: jest.fn(),
}));
// Import the actual module with the function we want to test
const { getAvailableTools } = require('./PluginController');
const { getAvailableTools, getAvailablePluginsController } = require('./PluginController');
const {
filterUniquePlugins,
checkPluginAuth,
convertMCPToolsToPlugins,
getToolkitKey,
} = require('@librechat/api');
describe('PluginController', () => {
describe('plugin.icon behavior', () => {
let mockReq, mockRes, mockCache;
let mockReq, mockRes, mockCache;
beforeEach(() => {
jest.clearAllMocks();
mockReq = { user: { id: 'test-user-id' } };
mockRes = { status: jest.fn().mockReturnThis(), json: jest.fn() };
mockCache = { get: jest.fn(), set: jest.fn() };
getLogStores.mockReturnValue(mockCache);
});
describe('getAvailablePluginsController', () => {
beforeEach(() => {
mockReq.app = { locals: { filteredTools: [], includedTools: [] } };
});
it('should use filterUniquePlugins to remove duplicate plugins', async () => {
const mockPlugins = [
{ name: 'Plugin1', pluginKey: 'key1', description: 'First' },
{ name: 'Plugin2', pluginKey: 'key2', description: 'Second' },
];
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue(mockPlugins);
checkPluginAuth.mockReturnValue(true);
await getAvailablePluginsController(mockReq, mockRes);
expect(filterUniquePlugins).toHaveBeenCalled();
expect(mockRes.status).toHaveBeenCalledWith(200);
// The response includes authenticated: true for each plugin when checkPluginAuth returns true
expect(mockRes.json).toHaveBeenCalledWith([
{ name: 'Plugin1', pluginKey: 'key1', description: 'First', authenticated: true },
{ name: 'Plugin2', pluginKey: 'key2', description: 'Second', authenticated: true },
]);
});
it('should use checkPluginAuth to verify plugin authentication', async () => {
const mockPlugin = { name: 'Plugin1', pluginKey: 'key1', description: 'First' };
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue([mockPlugin]);
checkPluginAuth.mockReturnValueOnce(true);
await getAvailablePluginsController(mockReq, mockRes);
expect(checkPluginAuth).toHaveBeenCalledWith(mockPlugin);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData[0].authenticated).toBe(true);
});
it('should return cached plugins when available', async () => {
const cachedPlugins = [
{ name: 'CachedPlugin', pluginKey: 'cached', description: 'Cached plugin' },
];
mockCache.get.mockResolvedValue(cachedPlugins);
await getAvailablePluginsController(mockReq, mockRes);
expect(filterUniquePlugins).not.toHaveBeenCalled();
expect(checkPluginAuth).not.toHaveBeenCalled();
expect(mockRes.json).toHaveBeenCalledWith(cachedPlugins);
});
it('should filter plugins based on includedTools', async () => {
const mockPlugins = [
{ name: 'Plugin1', pluginKey: 'key1', description: 'First' },
{ name: 'Plugin2', pluginKey: 'key2', description: 'Second' },
];
mockReq.app.locals.includedTools = ['key1'];
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue(mockPlugins);
checkPluginAuth.mockReturnValue(false);
await getAvailablePluginsController(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData).toHaveLength(1);
expect(responseData[0].pluginKey).toBe('key1');
});
});
describe('getAvailableTools', () => {
it('should use convertMCPToolsToPlugins for user-specific MCP tools', async () => {
const mockUserTools = {
[`tool1${Constants.mcp_delimiter}server1`]: {
function: { name: 'tool1', description: 'Tool 1' },
},
};
const mockConvertedPlugins = [
{
name: 'tool1',
pluginKey: `tool1${Constants.mcp_delimiter}server1`,
description: 'Tool 1',
},
];
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce(mockUserTools);
convertMCPToolsToPlugins.mockReturnValue(mockConvertedPlugins);
filterUniquePlugins.mockImplementation((plugins) => plugins);
getCustomConfig.mockResolvedValue(null);
await getAvailableTools(mockReq, mockRes);
expect(convertMCPToolsToPlugins).toHaveBeenCalledWith({
functionTools: mockUserTools,
customConfig: null,
});
});
it('should use filterUniquePlugins to deduplicate combined tools', async () => {
const mockUserPlugins = [
{ name: 'UserTool', pluginKey: 'user-tool', description: 'User tool' },
];
const mockManifestPlugins = [
{ name: 'ManifestTool', pluginKey: 'manifest-tool', description: 'Manifest tool' },
];
mockCache.get.mockResolvedValue(mockManifestPlugins);
getCachedTools.mockResolvedValueOnce({});
convertMCPToolsToPlugins.mockReturnValue(mockUserPlugins);
filterUniquePlugins.mockReturnValue([...mockUserPlugins, ...mockManifestPlugins]);
getCustomConfig.mockResolvedValue(null);
await getAvailableTools(mockReq, mockRes);
// Should be called to deduplicate the combined array
expect(filterUniquePlugins).toHaveBeenLastCalledWith([
...mockUserPlugins,
...mockManifestPlugins,
]);
});
it('should use checkPluginAuth to verify authentication status', async () => {
const mockPlugin = { name: 'Tool1', pluginKey: 'tool1', description: 'Tool 1' };
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockReturnValue([mockPlugin]);
checkPluginAuth.mockReturnValue(true);
getCustomConfig.mockResolvedValue(null);
// Mock getCachedTools second call to return tool definitions
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce({ tool1: true });
await getAvailableTools(mockReq, mockRes);
expect(checkPluginAuth).toHaveBeenCalledWith(mockPlugin);
});
it('should use getToolkitKey for toolkit validation', async () => {
const mockToolkit = {
name: 'Toolkit1',
pluginKey: 'toolkit1',
description: 'Toolkit 1',
toolkit: true,
};
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockReturnValue([mockToolkit]);
checkPluginAuth.mockReturnValue(false);
getToolkitKey.mockReturnValue('toolkit1');
getCustomConfig.mockResolvedValue(null);
// Mock getCachedTools second call to return tool definitions
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce({
toolkit1_function: true,
});
await getAvailableTools(mockReq, mockRes);
expect(getToolkitKey).toHaveBeenCalled();
});
});
describe('plugin.icon behavior', () => {
const callGetAvailableToolsWithMCPServer = async (mcpServers) => {
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue({ mcpServers });
@@ -50,7 +242,22 @@ describe('PluginController', () => {
function: { name: 'test-tool', description: 'A test tool' },
},
};
const mockConvertedPlugin = {
name: 'test-tool',
pluginKey: `test-tool${Constants.mcp_delimiter}test-server`,
description: 'A test tool',
icon: mcpServers['test-server']?.iconPath,
authenticated: true,
authConfig: [],
};
getCachedTools.mockResolvedValueOnce(functionTools);
convertMCPToolsToPlugins.mockReturnValue([mockConvertedPlugin]);
filterUniquePlugins.mockImplementation((plugins) => plugins);
checkPluginAuth.mockReturnValue(true);
getToolkitKey.mockReturnValue(undefined);
getCachedTools.mockResolvedValueOnce({
[`test-tool${Constants.mcp_delimiter}test-server`]: true,
});
@@ -60,14 +267,6 @@ describe('PluginController', () => {
return responseData.find((tool) => tool.name === 'test-tool');
};
beforeEach(() => {
jest.clearAllMocks();
mockReq = { user: { id: 'test-user-id' } };
mockRes = { status: jest.fn().mockReturnThis(), json: jest.fn() };
mockCache = { get: jest.fn(), set: jest.fn() };
getLogStores.mockReturnValue(mockCache);
});
it('should set plugin.icon when iconPath is defined', async () => {
const mcpServers = {
'test-server': {
@@ -86,4 +285,236 @@ describe('PluginController', () => {
expect(testTool.icon).toBeUndefined();
});
});
describe('helper function integration', () => {
it('should properly handle MCP tools with custom user variables', async () => {
const customConfig = {
mcpServers: {
'test-server': {
customUserVars: {
API_KEY: { title: 'API Key', description: 'Your API key' },
},
},
},
};
// We need to test the actual flow where MCP manager tools are included
const mcpManagerTools = [
{
name: 'tool1',
pluginKey: `tool1${Constants.mcp_delimiter}test-server`,
description: 'Tool 1',
authenticated: true,
},
];
// Mock the MCP manager to return tools
const mockMCPManager = {
loadManifestTools: jest.fn().mockResolvedValue(mcpManagerTools),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue(customConfig);
// First call returns user tools (empty in this case)
getCachedTools.mockResolvedValueOnce({});
// Mock convertMCPToolsToPlugins to return empty array for user tools
convertMCPToolsToPlugins.mockReturnValue([]);
// Mock filterUniquePlugins to pass through
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
// Mock checkPluginAuth
checkPluginAuth.mockReturnValue(true);
// Second call returns tool definitions
getCachedTools.mockResolvedValueOnce({
[`tool1${Constants.mcp_delimiter}test-server`]: true,
});
await getAvailableTools(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
// Find the MCP tool in the response
const mcpTool = responseData.find(
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}test-server`,
);
// The actual implementation adds authConfig and sets authenticated to false when customUserVars exist
expect(mcpTool).toBeDefined();
expect(mcpTool.authConfig).toEqual([
{ authField: 'API_KEY', label: 'API Key', description: 'Your API key' },
]);
expect(mcpTool.authenticated).toBe(false);
});
it('should handle error cases gracefully', async () => {
mockCache.get.mockRejectedValue(new Error('Cache error'));
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.json).toHaveBeenCalledWith({ message: 'Cache error' });
});
});
describe('edge cases with undefined/null values', () => {
it('should handle undefined cache gracefully', async () => {
getLogStores.mockReturnValue(undefined);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(500);
});
it('should handle null cachedTools and cachedUserTools', async () => {
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue(null);
convertMCPToolsToPlugins.mockReturnValue(undefined);
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
getCustomConfig.mockResolvedValue(null);
await getAvailableTools(mockReq, mockRes);
expect(convertMCPToolsToPlugins).toHaveBeenCalledWith({
functionTools: null,
customConfig: null,
});
});
it('should handle when getCachedTools returns undefined', async () => {
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue(undefined);
convertMCPToolsToPlugins.mockReturnValue(undefined);
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
getCustomConfig.mockResolvedValue(null);
checkPluginAuth.mockReturnValue(false);
// Mock getCachedTools to return undefined for both calls
getCachedTools.mockReset();
getCachedTools.mockResolvedValueOnce(undefined).mockResolvedValueOnce(undefined);
await getAvailableTools(mockReq, mockRes);
expect(convertMCPToolsToPlugins).toHaveBeenCalledWith({
functionTools: undefined,
customConfig: null,
});
});
it('should handle cachedToolsArray and userPlugins both being defined', async () => {
const cachedTools = [{ name: 'CachedTool', pluginKey: 'cached-tool', description: 'Cached' }];
const userTools = {
'user-tool': { function: { name: 'user-tool', description: 'User tool' } },
};
const userPlugins = [{ name: 'UserTool', pluginKey: 'user-tool', description: 'User tool' }];
mockCache.get.mockResolvedValue(cachedTools);
getCachedTools.mockResolvedValue(userTools);
convertMCPToolsToPlugins.mockReturnValue(userPlugins);
filterUniquePlugins.mockReturnValue([...userPlugins, ...cachedTools]);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith([...userPlugins, ...cachedTools]);
});
it('should handle empty toolDefinitions object', async () => {
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
getCustomConfig.mockResolvedValue(null);
checkPluginAuth.mockReturnValue(true);
await getAvailableTools(mockReq, mockRes);
// With empty tool definitions, no tools should be in the final output
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should handle MCP tools without customUserVars', async () => {
const customConfig = {
mcpServers: {
'test-server': {
// No customUserVars defined
},
},
};
const mockUserTools = {
[`tool1${Constants.mcp_delimiter}test-server`]: {
function: { name: 'tool1', description: 'Tool 1' },
},
};
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue(customConfig);
getCachedTools.mockResolvedValueOnce(mockUserTools);
const mockPlugin = {
name: 'tool1',
pluginKey: `tool1${Constants.mcp_delimiter}test-server`,
description: 'Tool 1',
authenticated: true,
authConfig: [],
};
convertMCPToolsToPlugins.mockReturnValue([mockPlugin]);
filterUniquePlugins.mockImplementation((plugins) => plugins);
checkPluginAuth.mockReturnValue(true);
getCachedTools.mockResolvedValueOnce({
[`tool1${Constants.mcp_delimiter}test-server`]: true,
});
await getAvailableTools(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData[0].authenticated).toBe(true);
// The actual implementation doesn't set authConfig on tools without customUserVars
expect(responseData[0].authConfig).toEqual([]);
});
it('should handle req.app.locals with undefined filteredTools and includedTools', async () => {
mockReq.app = { locals: {} };
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue([]);
checkPluginAuth.mockReturnValue(false);
await getAvailablePluginsController(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should handle toolkit with undefined toolDefinitions keys', async () => {
const mockToolkit = {
name: 'Toolkit1',
pluginKey: 'toolkit1',
description: 'Toolkit 1',
toolkit: true,
};
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockReturnValue([mockToolkit]);
checkPluginAuth.mockReturnValue(false);
getToolkitKey.mockReturnValue(undefined);
getCustomConfig.mockResolvedValue(null);
// Mock getCachedTools second call to return null
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce(null);
await getAvailableTools(mockReq, mockRes);
// Should handle null toolDefinitions gracefully
expect(mockRes.status).toHaveBeenCalledWith(200);
});
});
});

View File

@@ -99,10 +99,36 @@ const confirm2FA = async (req, res) => {
/**
* Disable 2FA by clearing the stored secret and backup codes.
* Requires verification with either TOTP token or backup code if 2FA is fully enabled.
*/
const disable2FA = async (req, res) => {
try {
const userId = req.user.id;
const { token, backupCode } = req.body;
const user = await getUserById(userId);
if (!user || !user.totpSecret) {
return res.status(400).json({ message: '2FA is not setup for this user' });
}
if (user.twoFactorEnabled) {
const secret = await getTOTPSecret(user.totpSecret);
let isVerified = false;
if (token) {
isVerified = await verifyTOTP(secret, token);
} else if (backupCode) {
isVerified = await verifyBackupCode({ user, backupCode });
} else {
return res
.status(400)
.json({ message: 'Either token or backup code is required to disable 2FA' });
}
if (!isVerified) {
return res.status(401).json({ message: 'Invalid token or backup code' });
}
}
await updateUser(userId, { totpSecret: null, backupCodes: [], twoFactorEnabled: false });
return res.status(200).json();
} catch (err) {

View File

@@ -1,5 +1,5 @@
const { logger } = require('@librechat/data-schemas');
const { webSearchKeys, extractWebSearchEnvVars } = require('@librechat/api');
const { webSearchKeys, extractWebSearchEnvVars, normalizeHttpError } = require('@librechat/api');
const {
getFiles,
updateUser,
@@ -89,8 +89,8 @@ const updateUserPluginsController = async (req, res) => {
if (userPluginsService instanceof Error) {
logger.error('[userPluginsService]', userPluginsService);
const { status, message } = userPluginsService;
res.status(status).send({ message });
const { status, message } = normalizeHttpError(userPluginsService);
return res.status(status).send({ message });
}
}
@@ -137,7 +137,7 @@ const updateUserPluginsController = async (req, res) => {
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
({ status, message } = authService);
({ status, message } = normalizeHttpError(authService));
}
}
} else if (action === 'uninstall') {
@@ -151,7 +151,7 @@ const updateUserPluginsController = async (req, res) => {
`[authService] Error deleting all auth for MCP tool ${pluginKey}:`,
authService,
);
({ status, message } = authService);
({ status, message } = normalizeHttpError(authService));
}
} else {
// This handles:
@@ -163,7 +163,7 @@ const updateUserPluginsController = async (req, res) => {
authService = await deleteUserPluginAuth(user.id, keys[i]); // Deletes by authField name
if (authService instanceof Error) {
logger.error('[authService] Error deleting specific auth key:', authService);
({ status, message } = authService);
({ status, message } = normalizeHttpError(authService));
}
}
}
@@ -193,7 +193,8 @@ const updateUserPluginsController = async (req, res) => {
return res.status(status).send();
}
res.status(status).send({ message });
const normalized = normalizeHttpError({ status, message });
return res.status(normalized.status).send({ message: normalized.message });
} catch (err) {
logger.error('[updateUserPluginsController]', err);
return res.status(500).json({ message: 'Something went wrong.' });

View File

@@ -226,6 +226,42 @@ class AgentClient extends BaseClient {
return files;
}
async addDocuments(message, attachments) {
const documentResult =
await require('~/server/services/Files/documents').encodeAndFormatDocuments(
this.options.req,
attachments,
this.options.agent.provider,
);
message.documents =
documentResult.documents && documentResult.documents.length
? documentResult.documents
: undefined;
return documentResult.files;
}
async processAttachments(message, attachments) {
const [imageFiles, documentFiles] = await Promise.all([
this.addImageURLs(message, attachments),
this.addDocuments(message, attachments),
]);
const allFiles = [...imageFiles, ...documentFiles];
const seenFileIds = new Set();
const uniqueFiles = [];
for (const file of allFiles) {
if (file.file_id && !seenFileIds.has(file.file_id)) {
seenFileIds.add(file.file_id);
uniqueFiles.push(file);
} else if (!file.file_id) {
uniqueFiles.push(file);
}
}
return uniqueFiles;
}
async buildMessages(
messages,
parentMessageId,
@@ -259,7 +295,7 @@ class AgentClient extends BaseClient {
};
}
const files = await this.addImageURLs(
const files = await this.processAttachments(
orderedMessages[orderedMessages.length - 1],
attachments,
);
@@ -282,6 +318,23 @@ class AgentClient extends BaseClient {
assistantName: this.options?.modelLabel,
});
if (
message.documents &&
message.documents.length > 0 &&
message.role === 'user' &&
this.options.agent.provider === EModelEndpoint.anthropic
) {
const contentParts = [];
contentParts.push(...message.documents);
if (message.image_urls && message.image_urls.length > 0) {
contentParts.push(...message.image_urls);
}
const textContent =
typeof formattedMessage.content === 'string' ? formattedMessage.content : '';
contentParts.push({ type: 'text', text: textContent });
formattedMessage.content = contentParts;
}
if (message.ocr && i !== orderedMessages.length - 1) {
if (typeof formattedMessage.content === 'string') {
formattedMessage.content = message.ocr + '\n' + formattedMessage.content;
@@ -402,6 +455,34 @@ class AgentClient extends BaseClient {
return result;
}
/**
* Creates a promise that resolves with the memory promise result or undefined after a timeout
* @param {Promise<(TAttachment | null)[] | undefined>} memoryPromise - The memory promise to await
* @param {number} timeoutMs - Timeout in milliseconds (default: 3000)
* @returns {Promise<(TAttachment | null)[] | undefined>}
*/
async awaitMemoryWithTimeout(memoryPromise, timeoutMs = 3000) {
if (!memoryPromise) {
return;
}
try {
const timeoutPromise = new Promise((_, reject) =>
setTimeout(() => reject(new Error('Memory processing timeout')), timeoutMs),
);
const attachments = await Promise.race([memoryPromise, timeoutPromise]);
return attachments;
} catch (error) {
if (error.message === 'Memory processing timeout') {
logger.warn('[AgentClient] Memory processing timed out after 3 seconds');
} else {
logger.error('[AgentClient] Error processing memory:', error);
}
return;
}
}
/**
* @returns {Promise<string | undefined>}
*/
@@ -512,6 +593,39 @@ class AgentClient extends BaseClient {
return withoutKeys;
}
/**
* Filters out image URLs from message content
* @param {BaseMessage} message - The message to filter
* @returns {BaseMessage} - A new message with image URLs removed
*/
filterImageUrls(message) {
if (!message.content || typeof message.content === 'string') {
return message;
}
if (Array.isArray(message.content)) {
const filteredContent = message.content.filter(
(part) => part.type !== ContentTypes.IMAGE_URL,
);
if (filteredContent.length === 1 && filteredContent[0].type === ContentTypes.TEXT) {
const MessageClass = message.constructor;
return new MessageClass({
content: filteredContent[0].text,
additional_kwargs: message.additional_kwargs,
});
}
const MessageClass = message.constructor;
return new MessageClass({
content: filteredContent,
additional_kwargs: message.additional_kwargs,
});
}
return message;
}
/**
* @param {BaseMessage[]} messages
* @returns {Promise<void | (TAttachment | null)[]>}
@@ -540,7 +654,8 @@ class AgentClient extends BaseClient {
}
}
const bufferString = getBufferString(messagesToProcess);
const filteredMessages = messagesToProcess.map((msg) => this.filterImageUrls(msg));
const bufferString = getBufferString(filteredMessages);
const bufferMessage = new HumanMessage(`# Current Chat:\n\n${bufferString}`);
return await this.processMemory([bufferMessage]);
} catch (error) {
@@ -715,6 +830,51 @@ class AgentClient extends BaseClient {
};
const toolSet = new Set((this.options.agent.tools ?? []).map((tool) => tool && tool.name));
if (
this.options.agent.provider === EModelEndpoint.anthropic &&
payload &&
Array.isArray(payload)
) {
let userMessageWithDocs = null;
if (this.userMessage?.documents) {
userMessageWithDocs = this.userMessage;
} else if (this.currentMessages?.length > 0) {
const lastMessage = this.currentMessages[this.currentMessages.length - 1];
if (lastMessage.documents?.length > 0) {
userMessageWithDocs = lastMessage;
}
} else if (this.messages?.length > 0) {
const lastMessage = this.messages[this.messages.length - 1];
if (lastMessage.documents?.length > 0) {
userMessageWithDocs = lastMessage;
}
}
if (userMessageWithDocs) {
for (const payloadMessage of payload) {
if (
payloadMessage.role === 'user' &&
userMessageWithDocs.text === payloadMessage.content
) {
if (typeof payloadMessage.content === 'string') {
payloadMessage.content = [
...userMessageWithDocs.documents,
{ type: 'text', text: payloadMessage.content },
];
} else if (Array.isArray(payloadMessage.content)) {
payloadMessage.content = [
...userMessageWithDocs.documents,
...payloadMessage.content,
];
}
break;
}
}
}
}
let { messages: initialMessages, indexTokenCountMap } = formatAgentMessages(
payload,
this.indexTokenCountMap,
@@ -968,11 +1128,9 @@ class AgentClient extends BaseClient {
});
try {
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
const attachments = await this.awaitMemoryWithTimeout(memoryPromise);
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
await this.recordCollectedUsage({ context: 'message' });
} catch (err) {
@@ -982,11 +1140,9 @@ class AgentClient extends BaseClient {
);
}
} catch (err) {
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
const attachments = await this.awaitMemoryWithTimeout(memoryPromise);
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
@@ -1088,11 +1244,16 @@ class AgentClient extends BaseClient {
clientOptions.configuration = options.configOptions;
}
// Ensure maxTokens is set for non-o1 models
if (!/\b(o\d)\b/i.test(clientOptions.model) && !clientOptions.maxTokens) {
clientOptions.maxTokens = 75;
} else if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
const shouldRemoveMaxTokens = /\b(o\d|gpt-[5-9])\b/i.test(clientOptions.model);
if (shouldRemoveMaxTokens && clientOptions.maxTokens != null) {
delete clientOptions.maxTokens;
} else if (!shouldRemoveMaxTokens && !clientOptions.maxTokens) {
clientOptions.maxTokens = 75;
}
if (shouldRemoveMaxTokens && clientOptions?.modelKwargs?.max_completion_tokens != null) {
delete clientOptions.modelKwargs.max_completion_tokens;
} else if (shouldRemoveMaxTokens && clientOptions?.modelKwargs?.max_output_tokens != null) {
delete clientOptions.modelKwargs.max_output_tokens;
}
clientOptions = Object.assign(

View File

@@ -727,4 +727,464 @@ describe('AgentClient - titleConvo', () => {
});
});
});
describe('getOptions method - GPT-5+ model handling', () => {
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
model_parameters: {
model: 'gpt-5',
},
};
mockReq = {
app: {
locals: {},
},
user: {
id: 'user-123',
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
client = new AgentClient(mockOptions);
});
it('should move maxTokens to modelKwargs.max_completion_tokens for GPT-5 models', () => {
const clientOptions = {
model: 'gpt-5',
maxTokens: 2048,
temperature: 0.7,
};
// Simulate the getOptions logic that handles GPT-5+ models
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs).toBeDefined();
expect(clientOptions.modelKwargs.max_completion_tokens).toBe(2048);
expect(clientOptions.temperature).toBe(0.7); // Other options should remain
});
it('should move maxTokens to modelKwargs.max_output_tokens for GPT-5 models with useResponsesApi', () => {
const clientOptions = {
model: 'gpt-5',
maxTokens: 2048,
temperature: 0.7,
useResponsesApi: true,
};
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
const paramName =
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
clientOptions.modelKwargs[paramName] = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs).toBeDefined();
expect(clientOptions.modelKwargs.max_output_tokens).toBe(2048);
expect(clientOptions.temperature).toBe(0.7); // Other options should remain
});
it('should handle GPT-5+ models with existing modelKwargs', () => {
const clientOptions = {
model: 'gpt-6',
maxTokens: 1500,
temperature: 0.8,
modelKwargs: {
customParam: 'value',
},
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs).toEqual({
customParam: 'value',
max_completion_tokens: 1500,
});
});
it('should not modify maxTokens for non-GPT-5+ models', () => {
const clientOptions = {
model: 'gpt-4',
maxTokens: 2048,
temperature: 0.7,
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
// Should not be modified since it's GPT-4
expect(clientOptions.maxTokens).toBe(2048);
expect(clientOptions.modelKwargs).toBeUndefined();
});
it('should handle various GPT-5+ model formats', () => {
const testCases = [
{ model: 'gpt-5', shouldTransform: true },
{ model: 'gpt-5-turbo', shouldTransform: true },
{ model: 'gpt-6', shouldTransform: true },
{ model: 'gpt-7-preview', shouldTransform: true },
{ model: 'gpt-8', shouldTransform: true },
{ model: 'gpt-9-mini', shouldTransform: true },
{ model: 'gpt-4', shouldTransform: false },
{ model: 'gpt-4o', shouldTransform: false },
{ model: 'gpt-3.5-turbo', shouldTransform: false },
{ model: 'claude-3', shouldTransform: false },
];
testCases.forEach(({ model, shouldTransform }) => {
const clientOptions = {
model,
maxTokens: 1000,
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
if (shouldTransform) {
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs?.max_completion_tokens).toBe(1000);
} else {
expect(clientOptions.maxTokens).toBe(1000);
expect(clientOptions.modelKwargs).toBeUndefined();
}
});
});
it('should not swap max token param for older models when using useResponsesApi', () => {
const testCases = [
{ model: 'gpt-5', shouldTransform: true },
{ model: 'gpt-5-turbo', shouldTransform: true },
{ model: 'gpt-6', shouldTransform: true },
{ model: 'gpt-7-preview', shouldTransform: true },
{ model: 'gpt-8', shouldTransform: true },
{ model: 'gpt-9-mini', shouldTransform: true },
{ model: 'gpt-4', shouldTransform: false },
{ model: 'gpt-4o', shouldTransform: false },
{ model: 'gpt-3.5-turbo', shouldTransform: false },
{ model: 'claude-3', shouldTransform: false },
];
testCases.forEach(({ model, shouldTransform }) => {
const clientOptions = {
model,
maxTokens: 1000,
useResponsesApi: true,
};
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
const paramName =
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
clientOptions.modelKwargs[paramName] = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
if (shouldTransform) {
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs?.max_output_tokens).toBe(1000);
} else {
expect(clientOptions.maxTokens).toBe(1000);
expect(clientOptions.modelKwargs).toBeUndefined();
}
});
});
it('should not transform if maxTokens is null or undefined', () => {
const testCases = [
{ model: 'gpt-5', maxTokens: null },
{ model: 'gpt-5', maxTokens: undefined },
{ model: 'gpt-6', maxTokens: 0 }, // Should transform even if 0
];
testCases.forEach(({ model, maxTokens }, index) => {
const clientOptions = {
model,
maxTokens,
temperature: 0.7,
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
if (index < 2) {
// null or undefined cases
expect(clientOptions.maxTokens).toBe(maxTokens);
expect(clientOptions.modelKwargs).toBeUndefined();
} else {
// 0 case - should transform
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs?.max_completion_tokens).toBe(0);
}
});
});
});
describe('runMemory method', () => {
let client;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
let mockProcessMemory;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
model_parameters: {
model: 'gpt-4',
},
};
mockReq = {
app: {
locals: {
memory: {
messageWindowSize: 3,
},
},
},
user: {
id: 'user-123',
personalization: {
memories: true,
},
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
mockProcessMemory = jest.fn().mockResolvedValue([]);
client = new AgentClient(mockOptions);
client.processMemory = mockProcessMemory;
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
});
it('should filter out image URLs from message content', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'What is in this image?',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==',
detail: 'auto',
},
},
],
}),
new AIMessage('I can see a small red pixel in the image.'),
new HumanMessage({
content: [
{
type: 'text',
text: 'What about this one?',
},
{
type: 'image_url',
image_url: {
url: 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/',
detail: 'high',
},
},
],
}),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Verify the buffer message was created
expect(processedMessage.constructor.name).toBe('HumanMessage');
expect(processedMessage.content).toContain('# Current Chat:');
// Verify that image URLs are not in the buffer string
expect(processedMessage.content).not.toContain('image_url');
expect(processedMessage.content).not.toContain('data:image');
expect(processedMessage.content).not.toContain('base64');
// Verify text content is preserved
expect(processedMessage.content).toContain('What is in this image?');
expect(processedMessage.content).toContain('I can see a small red pixel in the image.');
expect(processedMessage.content).toContain('What about this one?');
});
it('should handle messages with only text content', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage('Hello, how are you?'),
new AIMessage('I am doing well, thank you!'),
new HumanMessage('That is great to hear.'),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
expect(processedMessage.content).toContain('Hello, how are you?');
expect(processedMessage.content).toContain('I am doing well, thank you!');
expect(processedMessage.content).toContain('That is great to hear.');
});
it('should handle mixed content types correctly', async () => {
const { HumanMessage } = require('@langchain/core/messages');
const { ContentTypes } = require('librechat-data-provider');
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'Here is some text',
},
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: 'https://example.com/image.png',
},
},
{
type: 'text',
text: ' and more text',
},
],
}),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Should contain text parts but not image URLs
expect(processedMessage.content).toContain('Here is some text');
expect(processedMessage.content).toContain('and more text');
expect(processedMessage.content).not.toContain('example.com/image.png');
expect(processedMessage.content).not.toContain('IMAGE_URL');
});
it('should preserve original messages without mutation', async () => {
const { HumanMessage } = require('@langchain/core/messages');
const originalContent = [
{
type: 'text',
text: 'Original text',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,ABC123',
},
},
];
const messages = [
new HumanMessage({
content: [...originalContent],
}),
];
await client.runMemory(messages);
// Verify original message wasn't mutated
expect(messages[0].content).toHaveLength(2);
expect(messages[0].content[1].type).toBe('image_url');
expect(messages[0].content[1].image_url.url).toBe('data:image/png;base64,ABC123');
});
it('should handle message window size correctly', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage('Message 1'),
new AIMessage('Response 1'),
new HumanMessage('Message 2'),
new AIMessage('Response 2'),
new HumanMessage('Message 3'),
new AIMessage('Response 3'),
];
// Window size is set to 3 in mockReq
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Should only include last 3 messages due to window size
expect(processedMessage.content).toContain('Message 3');
expect(processedMessage.content).toContain('Response 3');
expect(processedMessage.content).not.toContain('Message 1');
expect(processedMessage.content).not.toContain('Response 1');
});
it('should return early if processMemory is not set', async () => {
const { HumanMessage } = require('@langchain/core/messages');
client.processMemory = null;
const result = await client.runMemory([new HumanMessage('Test')]);
expect(result).toBeUndefined();
expect(mockProcessMemory).not.toHaveBeenCalled();
});
});
});

View File

@@ -105,8 +105,6 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
return res.end();
}
await cache.delete(cacheKey);
// const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
// logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
}
@@ -115,7 +113,6 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let run;
try {
// run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
@@ -128,18 +125,9 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let finalEvent;
try {
// const errorContentPart = {
// text: {
// value:
// error?.message ?? 'There was an error processing your request. Please try again later.',
// },
// type: ContentTypes.ERROR,
// };
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
// runMessages,
};
} catch (error) {
logger.error(`[${originPath}] Error finalizing error process`, error);

View File

@@ -233,6 +233,26 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
);
}
}
// Edge case: sendMessage completed but abort happened during sendCompletion
// We need to ensure a final event is sent
else if (!res.headersSent && !res.finished) {
logger.debug(
'[AgentController] Handling edge case: `sendMessage` completed but aborted during `sendCompletion`',
);
const finalResponse = { ...response };
finalResponse.error = true;
sendEvent(res, {
final: true,
conversation,
title: conversation.title,
requestMessage: userMessage,
responseMessage: finalResponse,
error: { message: 'Request was aborted during completion' },
});
res.end();
}
// Save user message if needed
if (!client.skipSaveUserMessage) {

View File

@@ -194,6 +194,9 @@ const updateAgentHandler = async (req, res) => {
});
}
// Add version count to the response
updatedAgent.version = updatedAgent.versions ? updatedAgent.versions.length : 0;
if (updatedAgent.author) {
updatedAgent.author = updatedAgent.author.toString();
}

View File

@@ -498,6 +498,28 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
expect(mockRes.json).toHaveBeenCalledWith({ error: 'Agent not found' });
});
test('should include version field in update response', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Updated with Version Check',
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
// Verify version field is included and is a number
expect(updatedAgent).toHaveProperty('version');
expect(typeof updatedAgent.version).toBe('number');
expect(updatedAgent.version).toBeGreaterThanOrEqual(1);
// Verify in database
const agentInDb = await Agent.findOne({ id: existingAgentId });
expect(updatedAgent.version).toBe(agentInDb.versions.length);
});
test('should handle validation errors properly', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;

View File

@@ -152,7 +152,7 @@ const chatV1 = async (req, res) => {
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[/assistants/chat/] Error cancelling run', error);
@@ -162,7 +162,7 @@ const chatV1 = async (req, res) => {
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
await recordUsage({
...run.usage,
model: run.model,
@@ -623,7 +623,7 @@ const chatV1 = async (req, res) => {
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,

View File

@@ -467,7 +467,7 @@ const chatV2 = async (req, res) => {
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,

View File

@@ -108,7 +108,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
@@ -118,7 +118,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
await recordUsage({
...run.usage,
model: run.model,

View File

@@ -173,6 +173,16 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
};
};
/**
* Initializes the OpenAI client.
* @param {object} params - The parameters object.
* @param {ServerRequest} params.req - The request object.
* @param {ServerResponse} params.res - The response object.
* @param {TEndpointOption} params.endpointOption - The endpoint options.
* @param {boolean} params.initAppClient - Whether to initialize the app client.
* @param {string} params.overrideEndpoint - The endpoint to override.
* @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client.
*/
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
const version = await getCurrentVersion(req, endpoint);

View File

@@ -197,7 +197,7 @@ const deleteAssistant = async (req, res) => {
await validateAuthor({ req, openai });
const assistant_id = req.params.id;
const deletionStatus = await openai.beta.assistants.del(assistant_id);
const deletionStatus = await openai.beta.assistants.delete(assistant_id);
if (deletionStatus?.deleted) {
await deleteAssistantActions({ req, assistant_id });
}
@@ -365,7 +365,7 @@ const uploadAssistantAvatar = async (req, res) => {
try {
await fs.unlink(req.file.path);
logger.debug('[/:agent_id/avatar] Temp. image upload file deleted');
} catch (error) {
} catch {
logger.debug('[/:agent_id/avatar] Temp. image upload file already deleted');
}
}

View File

@@ -8,14 +8,12 @@ const express = require('express');
const passport = require('passport');
const compression = require('compression');
const cookieParser = require('cookie-parser');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const mongoSanitize = require('express-mongo-sanitize');
const { isEnabled, ErrorController } = require('@librechat/api');
const { connectDb, indexSync } = require('~/db');
const validateImageRequest = require('./middleware/validateImageRequest');
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
const errorController = require('./controllers/ErrorController');
const initializeMCPs = require('./services/initializeMCPs');
const configureSocialLogins = require('./socialLogins');
const AppService = require('./services/AppService');
@@ -120,8 +118,7 @@ const startServer = async () => {
app.use('/api/tags', routes.tags);
app.use('/api/mcp', routes.mcp);
// Add the error controller one more time after all routes
app.use(errorController);
app.use(ErrorController);
app.use((req, res) => {
res.set({

View File

@@ -92,7 +92,7 @@ async function healthCheckPoll(app, retries = 0) {
if (response.status === 200) {
return; // App is healthy
}
} catch (error) {
} catch {
// Ignore connection errors during polling
}

View File

@@ -47,7 +47,7 @@ async function abortRun(req, res) {
try {
await cache.set(cacheKey, 'cancelled', three_minutes);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
logger.debug('[abortRun] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[abortRun] Error cancelling run', error);
@@ -60,7 +60,7 @@ async function abortRun(req, res) {
}
try {
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
const run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
await recordUsage({
...run.usage,
model: run.model,

View File

@@ -33,7 +33,7 @@ const validateModel = async (req, res, next) => {
return next();
}
const { ILLEGAL_MODEL_REQ_SCORE: score = 5 } = process.env ?? {};
const { ILLEGAL_MODEL_REQ_SCORE: score = 1 } = process.env ?? {};
const type = ViolationTypes.ILLEGAL_MODEL_REQUEST;
const errorMessage = {

File diff suppressed because it is too large Load Diff

View File

@@ -111,7 +111,7 @@ router.delete('/', async (req, res) => {
/** @type {{ openai: OpenAI }} */
const { openai } = await assistantClients[endpoint].initializeClient({ req, res });
try {
const response = await openai.beta.threads.del(thread_id);
const response = await openai.beta.threads.delete(thread_id);
logger.debug('Deleted OpenAI thread:', response);
} catch (error) {
logger.error('Error deleting OpenAI thread:', error);

View File

@@ -413,13 +413,15 @@ router.post('/', async (req, res) => {
logger.error('[/files] Error deleting file:', error);
}
res.status(500).json({ message });
}
if (cleanup) {
try {
await fs.unlink(req.file.path);
} catch (error) {
logger.error('[/files] Error deleting file after file processing:', error);
} finally {
if (cleanup) {
try {
await fs.unlink(req.file.path);
} catch (error) {
logger.error('[/files] Error deleting file after file processing:', error);
}
} else {
logger.debug('[/files] File processing completed without cleanup');
}
}
});

View File

@@ -337,9 +337,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
for (const varName of Object.keys(serverConfig.customUserVars)) {
try {
const value = await getUserPluginAuthValue(user.id, varName, false);
if (value) {
customUserVars[varName] = value;
}
customUserVars[varName] = value;
} catch (err) {
logger.error(`[MCP Reinitialize] Error fetching ${varName} for user ${user.id}:`, err);
}
@@ -504,10 +502,6 @@ router.get('/connection/status/:serverName', requireJwtAuth, async (req, res) =>
return res.status(401).json({ error: 'User not authenticated' });
}
if (!serverName) {
return res.status(400).json({ error: 'Server name is required' });
}
const { mcpConfig, appConnections, userConnections, oauthServers } = await getMCPSetupData(
user.id,
);

View File

@@ -13,6 +13,8 @@ const { getRoleByName } = require('~/models/Role');
const router = express.Router();
const memoryPayloadLimit = express.json({ limit: '100kb' });
const checkMemoryRead = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.READ],
@@ -60,6 +62,7 @@ router.get('/', checkMemoryRead, async (req, res) => {
const memoryConfig = req.app.locals?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
const charLimit = memoryConfig?.charLimit || 10000;
let usagePercentage = null;
if (tokenLimit && tokenLimit > 0) {
@@ -70,6 +73,7 @@ router.get('/', checkMemoryRead, async (req, res) => {
memories: sortedMemories,
totalTokens,
tokenLimit: tokenLimit || null,
charLimit,
usagePercentage,
});
} catch (error) {
@@ -83,7 +87,7 @@ router.get('/', checkMemoryRead, async (req, res) => {
* Body: { key: string, value: string }
* Returns 201 and { created: true, memory: <createdDoc> } when successful.
*/
router.post('/', checkMemoryCreate, async (req, res) => {
router.post('/', memoryPayloadLimit, checkMemoryCreate, async (req, res) => {
const { key, value } = req.body;
if (typeof key !== 'string' || key.trim() === '') {
@@ -94,13 +98,25 @@ router.post('/', checkMemoryCreate, async (req, res) => {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
const memoryConfig = req.app.locals?.memory;
const charLimit = memoryConfig?.charLimit || 10000;
if (key.length > 1000) {
return res.status(400).json({
error: `Key exceeds maximum length of 1000 characters. Current length: ${key.length} characters.`,
});
}
if (value.length > charLimit) {
return res.status(400).json({
error: `Value exceeds maximum length of ${charLimit} characters. Current length: ${value.length} characters.`,
});
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
// Check token limit
const memoryConfig = req.app.locals?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
if (tokenLimit) {
@@ -175,7 +191,7 @@ router.patch('/preferences', checkMemoryOptOut, async (req, res) => {
* Body: { key?: string, value: string }
* Returns 200 and { updated: true, memory: <updatedDoc> } when successful.
*/
router.patch('/:key', checkMemoryUpdate, async (req, res) => {
router.patch('/:key', memoryPayloadLimit, checkMemoryUpdate, async (req, res) => {
const { key: urlKey } = req.params;
const { key: bodyKey, value } = req.body || {};
@@ -183,9 +199,23 @@ router.patch('/:key', checkMemoryUpdate, async (req, res) => {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
// Use the key from the body if provided, otherwise use the key from the URL
const newKey = bodyKey || urlKey;
const memoryConfig = req.app.locals?.memory;
const charLimit = memoryConfig?.charLimit || 10000;
if (newKey.length > 1000) {
return res.status(400).json({
error: `Key exceeds maximum length of 1000 characters. Current length: ${newKey.length} characters.`,
});
}
if (value.length > charLimit) {
return res.status(400).json({
error: `Value exceeds maximum length of ${charLimit} characters. Current length: ${value.length} characters.`,
});
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
@@ -196,7 +226,6 @@ router.patch('/:key', checkMemoryUpdate, async (req, res) => {
return res.status(404).json({ error: 'Memory not found.' });
}
// If the key is changing, we need to handle it specially
if (newKey !== urlKey) {
const keyExists = memories.find((m) => m.key === newKey);
if (keyExists) {
@@ -219,7 +248,6 @@ router.patch('/:key', checkMemoryUpdate, async (req, res) => {
return res.status(500).json({ error: 'Failed to delete old memory.' });
}
} else {
// Key is not changing, just update the value
const result = await setMemory({
userId: req.user.id,
key: newKey,

View File

@@ -1,7 +1,10 @@
// file deepcode ignore NoRateLimitingForLogin: Rate limiting is handled by the `loginLimiter` middleware
const express = require('express');
const passport = require('passport');
const { isEnabled } = require('@librechat/api');
const { randomState } = require('openid-client');
const { logger } = require('@librechat/data-schemas');
const { ErrorTypes } = require('librechat-data-provider');
const {
checkBan,
logHeaders,
@@ -10,8 +13,6 @@ const {
checkDomainAllowed,
} = require('~/server/middleware');
const { setAuthTokens, setOpenIDAuthTokens } = require('~/server/services/AuthService');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();
@@ -46,13 +47,13 @@ const oauthHandler = async (req, res) => {
};
router.get('/error', (req, res) => {
// A single error message is pushed by passport when authentication fails.
/** A single error message is pushed by passport when authentication fails. */
const errorMessage = req.session?.messages?.pop() || 'Unknown error';
logger.error('Error in OAuth authentication:', {
message: req.session?.messages?.pop() || 'Unknown error',
message: errorMessage,
});
// Redirect to login page with auth_failed parameter to prevent infinite redirect loops
res.redirect(`${domains.client}/login?redirect=false`);
res.redirect(`${domains.client}/login?redirect=false&error=${ErrorTypes.AUTH_FAILED}`);
});
/**

View File

@@ -1,9 +1,8 @@
const { agentsConfigSetup, loadWebSearchConfig } = require('@librechat/api');
const { loadMemoryConfig, agentsConfigSetup, loadWebSearchConfig } = require('@librechat/api');
const {
FileSources,
loadOCRConfig,
EModelEndpoint,
loadMemoryConfig,
getConfigDefaults,
} = require('librechat-data-provider');
const {

View File

@@ -60,7 +60,14 @@ const replaceArtifactContent = (originalText, artifact, original, updated) => {
// Find boundaries between ARTIFACT_START and ARTIFACT_END
const contentStart = artifactContent.indexOf('\n', artifactContent.indexOf(ARTIFACT_START)) + 1;
const contentEnd = artifactContent.lastIndexOf(ARTIFACT_END);
let contentEnd = artifactContent.lastIndexOf(ARTIFACT_END);
// Special case: if contentEnd is 0, it means the only ::: found is at the start of :::artifact
// This indicates an incomplete artifact (no closing :::)
// We need to check that it's exactly at position 0 (the beginning of artifactContent)
if (contentEnd === 0 && artifactContent.indexOf(ARTIFACT_START) === 0) {
contentEnd = artifactContent.length;
}
if (contentStart === -1 || contentEnd === -1) {
return null;
@@ -72,12 +79,20 @@ const replaceArtifactContent = (originalText, artifact, original, updated) => {
// Determine where to look for the original content
let searchStart, searchEnd;
if (codeBlockStart !== -1 && codeBlockEnd !== -1) {
// If code blocks exist, search between them
if (codeBlockStart !== -1) {
// Code block starts
searchStart = codeBlockStart + 4; // after ```\n
searchEnd = codeBlockEnd;
if (codeBlockEnd !== -1 && codeBlockEnd > codeBlockStart) {
// Code block has proper ending
searchEnd = codeBlockEnd;
} else {
// No closing backticks found or they're before the opening (shouldn't happen)
// This might be an incomplete artifact - search to contentEnd
searchEnd = contentEnd;
}
} else {
// Otherwise search in the whole artifact content
// No code blocks at all
searchStart = contentStart;
searchEnd = contentEnd;
}

View File

@@ -89,9 +89,9 @@ describe('replaceArtifactContent', () => {
};
test('should replace content within artifact boundaries', () => {
const original = 'console.log(\'hello\')';
const original = "console.log('hello')";
const artifact = createTestArtifact(original);
const updated = 'console.log(\'updated\')';
const updated = "console.log('updated')";
const result = replaceArtifactContent(artifact.text, artifact, original, updated);
expect(result).toContain(updated);
@@ -317,4 +317,182 @@ console.log(greeting);`;
expect(result).not.toContain('\n\n```');
expect(result).not.toContain('```\n\n');
});
describe('incomplete artifacts', () => {
test('should handle incomplete artifacts (missing closing ::: and ```)', () => {
const original = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Pomodoro</title>
<meta name="description" content="A single-file Pomodoro timer with logs, charts, sounds, and dark mode." />
<style>
:root{`;
const prefix = `Awesome idea! I'll deliver a complete single-file HTML app called "Pomodoro" with:
- Custom session/break durations
You can save this as pomodoro.html and open it directly in your browser.
`;
// This simulates the real incomplete artifact case - no closing ``` or :::
const incompleteArtifact = `${ARTIFACT_START}{identifier="pomodoro-single-file-app" type="text/html" title="Pomodoro — Single File App"}
\`\`\`
${original}`;
const fullText = prefix + incompleteArtifact;
const message = { text: fullText };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
expect(artifacts[0].end).toBe(fullText.length);
const updated = original.replace('Pomodoro</title>', 'Pomodoro</title>UPDATED');
const result = replaceArtifactContent(fullText, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('UPDATED');
expect(result).toContain(prefix);
// Should not have added closing markers
expect(result).not.toMatch(/:::\s*$/);
});
test('should handle incomplete artifacts with only opening code block', () => {
const original = 'function hello() { console.log("world"); }';
const incompleteArtifact = `${ARTIFACT_START}{id="test"}\n\`\`\`\n${original}`;
const message = { text: incompleteArtifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'function hello() { console.log("UPDATED"); }';
const result = replaceArtifactContent(incompleteArtifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('UPDATED');
});
test('should handle incomplete artifacts without code blocks', () => {
const original = 'Some plain text content';
const incompleteArtifact = `${ARTIFACT_START}{id="test"}\n${original}`;
const message = { text: incompleteArtifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'Some UPDATED text content';
const result = replaceArtifactContent(incompleteArtifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('UPDATED');
});
});
describe('regression tests for edge cases', () => {
test('should still handle complete artifacts correctly', () => {
// Ensure we didn't break normal artifact handling
const original = 'console.log("test");';
const artifact = createArtifactText({ content: original });
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'console.log("updated");';
const result = replaceArtifactContent(artifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain(updated);
expect(result).toContain(ARTIFACT_END);
expect(result).toMatch(/```\nconsole\.log\("updated"\);\n```/);
});
test('should handle multiple complete artifacts', () => {
// Ensure multiple artifacts still work
const content1 = 'First artifact';
const content2 = 'Second artifact';
const text = `${createArtifactText({ content: content1 })}\n\n${createArtifactText({ content: content2 })}`;
const message = { text };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(2);
// Update first artifact
const result1 = replaceArtifactContent(text, artifacts[0], content1, 'First UPDATED');
expect(result1).not.toBeNull();
expect(result1).toContain('First UPDATED');
expect(result1).toContain(content2);
// Update second artifact
const result2 = replaceArtifactContent(text, artifacts[1], content2, 'Second UPDATED');
expect(result2).not.toBeNull();
expect(result2).toContain(content1);
expect(result2).toContain('Second UPDATED');
});
test('should not mistake ::: at position 0 for artifact end in complete artifacts', () => {
// This tests the specific fix - ensuring contentEnd=0 doesn't break complete artifacts
const original = 'test content';
// Create an artifact that will have ::: at position 0 when substring'd
const artifact = `${ARTIFACT_START}\n\`\`\`\n${original}\n\`\`\`\n${ARTIFACT_END}`;
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'updated content';
const result = replaceArtifactContent(artifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain(updated);
expect(result).toContain(ARTIFACT_END);
});
test('should handle empty artifacts', () => {
// Edge case: empty artifact
const artifact = `${ARTIFACT_START}\n${ARTIFACT_END}`;
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
// Trying to replace non-existent content should return null
const result = replaceArtifactContent(artifact, artifacts[0], 'something', 'updated');
expect(result).toBeNull();
});
test('should preserve whitespace and formatting in complete artifacts', () => {
const original = ` function test() {
return {
value: 42
};
}`;
const artifact = createArtifactText({ content: original });
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
const updated = ` function test() {
return {
value: 100
};
}`;
const result = replaceArtifactContent(artifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('value: 100');
// Should preserve exact formatting
expect(result).toMatch(
/```\n {2}function test\(\) \{\n {4}return \{\n {6}value: 100\n {4}\};\n {2}\}\n```/,
);
});
});
});

View File

@@ -281,7 +281,7 @@ function createInProgressHandler(openai, thread_id, messages) {
openai.seenCompletedMessages.add(message_id);
const message = await openai.beta.threads.messages.retrieve(thread_id, message_id);
const message = await openai.beta.threads.messages.retrieve(message_id, { thread_id });
if (!message?.content?.length) {
return;
}
@@ -435,9 +435,11 @@ async function runAssistant({
};
});
const outputs = await processRequiredActions(openai, actions);
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, outputs);
const tool_outputs = await processRequiredActions(openai, actions);
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.id, {
thread_id: run.thread_id,
tool_outputs,
});
// Recursive call with accumulated steps and messages
return await runAssistant({

View File

@@ -1,12 +1,11 @@
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint } = require('librechat-data-provider');
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
const {
getAnthropicModels,
getBedrockModels,
getOpenAIModels,
getGoogleModels,
getBedrockModels,
getAnthropicModels,
} = require('~/server/services/ModelService');
const { logger } = require('~/config');
/**
* Loads the default models for the application.
@@ -16,58 +15,42 @@ const { logger } = require('~/config');
*/
async function loadDefaultModels(req) {
try {
const [
openAI,
anthropic,
azureOpenAI,
gptPlugins,
assistants,
azureAssistants,
google,
bedrock,
] = await Promise.all([
getOpenAIModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching OpenAI models:', error);
return [];
}),
getAnthropicModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching Anthropic models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: useAzurePlugins, plugins: true }).catch(
(error) => {
logger.error('Error fetching Plugin models:', error);
const [openAI, anthropic, azureOpenAI, assistants, azureAssistants, google, bedrock] =
await Promise.all([
getOpenAIModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching OpenAI models:', error);
return [];
},
),
getOpenAIModels({ assistants: true }).catch((error) => {
logger.error('Error fetching OpenAI Assistants API models:', error);
return [];
}),
getOpenAIModels({ azureAssistants: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
return [];
}),
Promise.resolve(getGoogleModels()).catch((error) => {
logger.error('Error getting Google models:', error);
return [];
}),
Promise.resolve(getBedrockModels()).catch((error) => {
logger.error('Error getting Bedrock models:', error);
return [];
}),
]);
}),
getAnthropicModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching Anthropic models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI models:', error);
return [];
}),
getOpenAIModels({ assistants: true }).catch((error) => {
logger.error('Error fetching OpenAI Assistants API models:', error);
return [];
}),
getOpenAIModels({ azureAssistants: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
return [];
}),
Promise.resolve(getGoogleModels()).catch((error) => {
logger.error('Error getting Google models:', error);
return [];
}),
Promise.resolve(getBedrockModels()).catch((error) => {
logger.error('Error getting Bedrock models:', error);
return [];
}),
]);
return {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: openAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.anthropic]: anthropic,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,

View File

@@ -1,6 +1,6 @@
const { logger } = require('@librechat/data-schemas');
const { isAgentsEndpoint, removeNullishValues, Constants } = require('librechat-data-provider');
const { loadAgent } = require('~/models/Agent');
const { logger } = require('~/config');
const buildOptions = (req, endpoint, parsedBody, endpointType) => {
const { spec, iconURL, agent_id, instructions, ...model_parameters } = parsedBody;

View File

@@ -1,4 +1,5 @@
const { logger } = require('@librechat/data-schemas');
const { validateAgentModel } = require('@librechat/api');
const { createContentAggregator } = require('@librechat/agents');
const {
Constants,
@@ -11,10 +12,12 @@ const {
getDefaultHandlers,
} = require('~/server/controllers/agents/callbacks');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getAgent } = require('~/models/Agent');
const { logViolation } = require('~/cache');
function createToolLoader() {
/**
@@ -72,6 +75,19 @@ const initializeClient = async ({ req, res, endpointOption }) => {
throw new Error('Agent not found');
}
const modelsConfig = await getModelsConfig(req);
const validationResult = await validateAgentModel({
req,
res,
modelsConfig,
logViolation,
agent: primaryAgent,
});
if (!validationResult.isValid) {
throw new Error(validationResult.error?.message);
}
const agentConfigs = new Map();
/** @type {Set<string>} */
const allowedProviders = new Set(req?.app?.locals?.[EModelEndpoint.agents]?.allowedProviders);
@@ -101,6 +117,19 @@ const initializeClient = async ({ req, res, endpointOption }) => {
if (!agent) {
throw new Error(`Agent ${agentId} not found`);
}
const validationResult = await validateAgentModel({
req,
res,
agent,
modelsConfig,
logViolation,
});
if (!validationResult.isValid) {
throw new Error(validationResult.error?.message);
}
const config = await initializeAgent({
req,
res,

View File

@@ -6,7 +6,7 @@ const {
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const OAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
@@ -79,7 +79,7 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
openai.res = res;
if (endpointOption && initAppClient) {
const client = new OpenAIClient(apiKey, clientOptions);
const client = new OAIClient(apiKey, clientOptions);
return {
client,
openai,

View File

@@ -3,11 +3,11 @@ const { ProxyAgent } = require('undici');
const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
const {
checkUserKeyExpiry,
getUserKeyValues,
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const OAIClient = require('~/app/clients/OpenAIClient');
class Files {
constructor(client) {
@@ -184,7 +184,7 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
}
if (endpointOption && initAppClient) {
const client = new OpenAIClient(apiKey, clientOptions);
const client = new OAIClient(apiKey, clientOptions);
return {
client,
openai,

View File

@@ -0,0 +1,166 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { validateAnthropicPdf } = require('../validation/pdfValidator');
/**
* Converts a readable stream to a buffer.
*
* @param {NodeJS.ReadableStream} stream - The readable stream to convert.
* @returns {Promise<Buffer>} - Promise resolving to the buffer.
*/
async function streamToBuffer(stream) {
return new Promise((resolve, reject) => {
const chunks = [];
stream.on('data', (chunk) => {
chunks.push(chunk);
});
stream.on('end', () => {
try {
const buffer = Buffer.concat(chunks);
chunks.length = 0; // Clear the array
resolve(buffer);
} catch (err) {
reject(err);
}
});
stream.on('error', (error) => {
chunks.length = 0;
reject(error);
});
}).finally(() => {
// Clean up the stream if required
if (stream.destroy && typeof stream.destroy === 'function') {
stream.destroy();
}
});
}
/**
* Processes and encodes document files for various endpoints
*
* @param {Express.Request} req - Express request object
* @param {MongoFile[]} files - Array of file objects to process
* @param {string} endpoint - The endpoint identifier (e.g., EModelEndpoint.anthropic)
* @returns {Promise<{documents: MessageContentDocument[], files: MongoFile[]}>}
*/
async function encodeAndFormatDocuments(req, files, endpoint) {
const promises = [];
/** @type {Record<FileSources, Pick<ReturnType<typeof getStrategyFunctions>, 'prepareDocumentPayload' | 'getDownloadStream'>>} */
const encodingMethods = {};
/** @type {{ documents: MessageContentDocument[]; files: MongoFile[] }} */
const result = {
documents: [],
files: [],
};
if (!files || !files.length) {
return result;
}
// Filter for document files only
const documentFiles = files.filter(
(file) => file.type === 'application/pdf' || file.type?.startsWith('application/'), // Future: support for other document types
);
if (!documentFiles.length) {
return result;
}
for (let file of documentFiles) {
/** @type {FileSources} */
const source = file.source ?? 'local';
// Only process PDFs for Anthropic for now
if (file.type !== 'application/pdf' || endpoint !== EModelEndpoint.anthropic) {
continue;
}
if (!encodingMethods[source]) {
encodingMethods[source] = getStrategyFunctions(source);
}
// Prepare file metadata
const fileMetadata = {
file_id: file.file_id || file._id,
temp_file_id: file.temp_file_id,
filepath: file.filepath,
source: file.source,
filename: file.filename,
type: file.type,
};
promises.push([file, fileMetadata]);
}
const results = await Promise.allSettled(
promises.map(async ([file, fileMetadata]) => {
if (!file || !fileMetadata) {
return { file: null, content: null, metadata: fileMetadata };
}
try {
const source = file.source ?? 'local';
const { getDownloadStream } = encodingMethods[source];
const stream = await getDownloadStream(req, file.filepath);
const buffer = await streamToBuffer(stream);
const documentContent = buffer.toString('base64');
return {
file,
content: documentContent,
metadata: fileMetadata,
};
} catch (error) {
console.error(`Error processing document ${file.filename}:`, error);
return { file, content: null, metadata: fileMetadata };
}
}),
);
for (const settledResult of results) {
if (settledResult.status === 'rejected') {
console.error('Document processing failed:', settledResult.reason);
continue;
}
const { file, content, metadata } = settledResult.value;
if (!content || !file) {
if (metadata) {
result.files.push(metadata);
}
continue;
}
if (file.type === 'application/pdf' && endpoint === EModelEndpoint.anthropic) {
const pdfBuffer = Buffer.from(content, 'base64');
const validation = await validateAnthropicPdf(pdfBuffer, pdfBuffer.length);
if (!validation.isValid) {
throw new Error(`PDF validation failed: ${validation.error}`);
}
const documentPart = {
type: 'document',
source: {
type: 'base64',
media_type: 'application/pdf',
data: content,
},
};
result.documents.push(documentPart);
result.files.push(metadata);
}
}
return result;
}
module.exports = {
encodeAndFormatDocuments,
};

View File

@@ -0,0 +1,5 @@
const { encodeAndFormatDocuments } = require('./encode');
module.exports = {
encodeAndFormatDocuments,
};

View File

@@ -391,7 +391,17 @@ const processFileUpload = async ({ req, res, metadata }) => {
const isAssistantUpload = isAssistantsEndpoint(metadata.endpoint);
const assistantSource =
metadata.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
const source = isAssistantUpload ? assistantSource : FileSources.vectordb;
// Use local storage for Anthropic native PDF support, vectordb for others
const isAnthropicUpload = metadata.endpoint === EModelEndpoint.anthropic;
let source;
if (isAssistantUpload) {
source = assistantSource;
} else if (isAnthropicUpload) {
source = FileSources.local;
} else {
source = FileSources.vectordb;
}
const { handleFileUpload } = getStrategyFunctions(source);
const { file_id, temp_file_id } = metadata;

View File

@@ -0,0 +1,77 @@
const { logger } = require('~/config');
const { anthropicPdfSizeLimit } = require('librechat-data-provider');
/**
* Validates if a PDF meets Anthropic's requirements
* @param {Buffer} pdfBuffer - The PDF file as a buffer
* @param {number} fileSize - The file size in bytes
* @returns {Promise<{isValid: boolean, error?: string}>}
*/
async function validateAnthropicPdf(pdfBuffer, fileSize) {
try {
// Check file size (32MB limit)
if (fileSize > anthropicPdfSizeLimit) {
return {
isValid: false,
error: `PDF file size (${Math.round(fileSize / (1024 * 1024))}MB) exceeds Anthropic's 32MB limit`,
};
}
// Basic PDF header validation
if (!pdfBuffer || pdfBuffer.length < 5) {
return {
isValid: false,
error: 'Invalid PDF file: too small or corrupted',
};
}
// Check PDF magic bytes
const pdfHeader = pdfBuffer.subarray(0, 5).toString();
if (!pdfHeader.startsWith('%PDF-')) {
return {
isValid: false,
error: 'Invalid PDF file: missing PDF header',
};
}
// Check for password protection/encryption
const pdfContent = pdfBuffer.toString('binary');
if (
pdfContent.includes('/Encrypt ') ||
pdfContent.includes('/U (') ||
pdfContent.includes('/O (')
) {
return {
isValid: false,
error: 'PDF is password-protected or encrypted. Anthropic requires unencrypted PDFs.',
};
}
// Estimate page count (this is a rough estimation)
const pageMatches = pdfContent.match(/\/Type[\s]*\/Page[^s]/g);
const estimatedPages = pageMatches ? pageMatches.length : 1;
if (estimatedPages > 100) {
return {
isValid: false,
error: `PDF has approximately ${estimatedPages} pages, exceeding Anthropic's 100-page limit`,
};
}
logger.debug(
`PDF validation passed: ${Math.round(fileSize / 1024)}KB, ~${estimatedPages} pages`,
);
return { isValid: true };
} catch (error) {
logger.error('PDF validation error:', error);
return {
isValid: false,
error: 'Failed to validate PDF file',
};
}
}
module.exports = {
validateAnthropicPdf,
};

View File

@@ -91,11 +91,10 @@ class RunManager {
* @param {boolean} [params.final] - The end of the run polling loop, due to `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired` statuses.
*/
async fetchRunSteps({ openai, thread_id, run_id, runStatus, final = false }) {
// const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(thread_id, run_id);
// const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(run_id, { thread_id });
const { data: _steps } = await openai.beta.threads.runs.steps.list(
thread_id,
run_id,
{},
{ thread_id },
{
timeout: 3000,
maxRetries: 5,

View File

@@ -573,9 +573,9 @@ class StreamRunManager {
let toolRun;
try {
toolRun = this.openai.beta.threads.runs.submitToolOutputsStream(
run.thread_id,
run.id,
{
thread_id: run.thread_id,
tool_outputs,
stream: true,
},

View File

@@ -33,7 +33,7 @@ async function withTimeout(promise, timeoutMs, timeoutMessage) {
* @param {string} [params.body.model] - Optional. The ID of the model to be used for this run.
* @param {string} [params.body.instructions] - Optional. Override the default system message of the assistant.
* @param {string} [params.body.additional_instructions] - Optional. Appends additional instructions
* at theend of the instructions for the run. This is useful for modifying
* at the end of the instructions for the run. This is useful for modifying
* the behavior on a per-run basis without overriding other instructions.
* @param {Object[]} [params.body.tools] - Optional. Override the tools the assistant can use for this run.
* @param {string[]} [params.body.file_ids] - Optional.
@@ -179,7 +179,7 @@ async function waitForRun({
* @return {Promise<RunStep[]>} A promise that resolves to an array of RunStep objects.
*/
async function _retrieveRunSteps({ openai, thread_id, run_id }) {
const runSteps = await openai.beta.threads.runs.steps.list(thread_id, run_id);
const runSteps = await openai.beta.threads.runs.steps.list(run_id, { thread_id });
return runSteps;
}

View File

@@ -192,7 +192,8 @@ async function addThreadMetadata({ openai, thread_id, messageId, messages }) {
const promises = [];
for (const message of messages) {
promises.push(
openai.beta.threads.messages.update(thread_id, message.id, {
openai.beta.threads.messages.update(message.id, {
thread_id,
metadata: {
messageId,
},
@@ -263,7 +264,8 @@ async function syncMessages({
}
modifyPromises.push(
openai.beta.threads.messages.update(thread_id, apiMessage.id, {
openai.beta.threads.messages.update(apiMessage.id, {
thread_id,
metadata: {
messageId: dbMessage.messageId,
},
@@ -413,7 +415,7 @@ async function checkMessageGaps({
}) {
const promises = [];
promises.push(openai.beta.threads.messages.list(thread_id, defaultOrderQuery));
promises.push(openai.beta.threads.runs.steps.list(thread_id, run_id));
promises.push(openai.beta.threads.runs.steps.list(run_id, { thread_id }));
/** @type {[{ data: ThreadMessage[] }, { data: RunStep[] }]} */
const [response, stepsResponse] = await Promise.all(promises);

View File

@@ -1,6 +1,7 @@
const fs = require('fs');
const path = require('path');
const { sleep } = require('@librechat/agents');
const { getToolkitKey } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Calculator } = require('@langchain/community/tools/calculator');
@@ -11,7 +12,6 @@ const {
ErrorTypes,
ContentTypes,
imageGenTools,
EToolResources,
EModelEndpoint,
actionDelimiter,
ImageVisionTool,
@@ -40,30 +40,6 @@ const { recordUsage } = require('~/server/services/Threads');
const { loadTools } = require('~/app/clients/tools/util');
const { redactMessage } = require('~/config/parsers');
/**
* @param {string} toolName
* @returns {string | undefined} toolKey
*/
function getToolkitKey(toolName) {
/** @type {string|undefined} */
let toolkitKey;
for (const toolkit of toolkits) {
if (toolName.startsWith(EToolResources.image_edit)) {
const splitMatches = toolkit.pluginKey.split('_');
const suffix = splitMatches[splitMatches.length - 1];
if (toolName.endsWith(suffix)) {
toolkitKey = toolkit.pluginKey;
break;
}
}
if (toolName.startsWith(toolkit.pluginKey)) {
toolkitKey = toolkit.pluginKey;
break;
}
}
return toolkitKey;
}
/**
* Loads and formats tools from the specified tool directory.
*
@@ -145,7 +121,7 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
for (const toolInstance of basicToolInstances) {
const formattedTool = formatToOpenAIAssistantTool(toolInstance);
let toolName = formattedTool[Tools.function].name;
toolName = getToolkitKey(toolName) ?? toolName;
toolName = getToolkitKey({ toolkits, toolName }) ?? toolName;
if (filter.has(toolName) && included.size === 0) {
continue;
}

View File

@@ -2,11 +2,11 @@ const {
SystemRoles,
Permissions,
PermissionTypes,
isMemoryEnabled,
removeNullishValues,
} = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { isMemoryEnabled } = require('@librechat/api');
const { updateAccessPermissions } = require('~/models/Role');
const { logger } = require('~/config');
/**
* Loads the default interface object.

View File

@@ -1,10 +1,10 @@
const fs = require('fs');
const { isEnabled } = require('@librechat/api');
const LdapStrategy = require('passport-ldapauth');
const { SystemRoles } = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { SystemRoles, ErrorTypes } = require('librechat-data-provider');
const { createUser, findUser, updateUser, countUsers } = require('~/models');
const { getBalanceConfig } = require('~/server/services/Config');
const { isEnabled } = require('~/server/utils');
const {
LDAP_URL,
@@ -90,6 +90,14 @@ const ldapLogin = new LdapStrategy(ldapOptions, async (userinfo, done) => {
(LDAP_ID && userinfo[LDAP_ID]) || userinfo.uid || userinfo.sAMAccountName || userinfo.mail;
let user = await findUser({ ldapId });
if (user && user.provider !== 'ldap') {
logger.info(
`[ldapStrategy] User ${user.email} already exists with provider ${user.provider}`,
);
return done(null, false, {
message: ErrorTypes.AUTH_FAILED,
});
}
const fullNameAttributes = LDAP_FULL_NAME && LDAP_FULL_NAME.split(',');
const fullName =

View File

@@ -3,9 +3,9 @@ const fetch = require('node-fetch');
const passport = require('passport');
const client = require('openid-client');
const jwtDecode = require('jsonwebtoken/decode');
const { CacheKeys } = require('librechat-data-provider');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { hashToken, logger } = require('@librechat/data-schemas');
const { CacheKeys, ErrorTypes } = require('librechat-data-provider');
const { Strategy: OpenIDStrategy } = require('openid-client/passport');
const { isEnabled, safeStringify, logHeaders } = require('@librechat/api');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
@@ -104,6 +104,14 @@ class CustomOpenIDStrategy extends OpenIDStrategy {
if (options?.state && !params.has('state')) {
params.set('state', options.state);
}
if (process.env.OPENID_AUDIENCE) {
params.set('audience', process.env.OPENID_AUDIENCE);
logger.debug(
`[openidStrategy] Adding audience to authorization request: ${process.env.OPENID_AUDIENCE}`,
);
}
return params;
}
}
@@ -312,6 +320,14 @@ async function setupOpenId() {
} for openidId: ${claims.sub}`,
);
}
if (user != null && user.provider !== 'openid') {
logger.info(
`[openidStrategy] Attempted OpenID login by user ${user.email}, was registered with "${user.provider}" provider`,
);
return done(null, false, {
message: ErrorTypes.AUTH_FAILED,
});
}
const userinfo = {
...claims,
...(await getUserInfo(openidConfig, tokenset.access_token, claims.sub)),
@@ -353,7 +369,7 @@ async function setupOpenId() {
username = userinfo[process.env.OPENID_USERNAME_CLAIM];
} else {
username = convertToUsername(
userinfo.username || userinfo.given_name || userinfo.email,
userinfo.preferred_username || userinfo.username || userinfo.email,
);
}

View File

@@ -1,7 +1,8 @@
const fetch = require('node-fetch');
const jwtDecode = require('jsonwebtoken/decode');
const { setupOpenId } = require('./openidStrategy');
const { ErrorTypes } = require('librechat-data-provider');
const { findUser, createUser, updateUser } = require('~/models');
const { setupOpenId } = require('./openidStrategy');
// --- Mocks ---
jest.mock('node-fetch');
@@ -50,11 +51,9 @@ jest.mock('openid-client', () => {
issuer: 'https://fake-issuer.com',
// Add any other properties needed by the implementation
}),
fetchUserInfo: jest.fn().mockImplementation((config, accessToken, sub) => {
fetchUserInfo: jest.fn().mockImplementation(() => {
// Only return additional properties, but don't override any claims
return Promise.resolve({
preferred_username: 'preferred_username',
});
return Promise.resolve({});
}),
customFetch: Symbol('customFetch'),
};
@@ -104,6 +103,7 @@ describe('setupOpenId', () => {
given_name: 'First',
family_name: 'Last',
name: 'My Full',
preferred_username: 'testusername',
username: 'flast',
picture: 'https://example.com/avatar.png',
}),
@@ -156,20 +156,20 @@ describe('setupOpenId', () => {
verifyCallback = require('openid-client/passport').__getVerifyCallback();
});
it('should create a new user with correct username when username claim exists', async () => {
// Arrange our userinfo already has username 'flast'
it('should create a new user with correct username when preferred_username claim exists', async () => {
// Arrange our userinfo already has preferred_username 'testusername'
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);
// Assert
expect(user.username).toBe(userinfo.username);
expect(user.username).toBe(userinfo.preferred_username);
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.username,
username: userinfo.preferred_username,
email: userinfo.email,
name: `${userinfo.given_name} ${userinfo.family_name}`,
}),
@@ -179,12 +179,12 @@ describe('setupOpenId', () => {
);
});
it('should use given_name as username when username claim is missing', async () => {
// Arrange remove username from userinfo
it('should use username as username when preferred_username claim is missing', async () => {
// Arrange remove preferred_username from userinfo
const userinfo = { ...tokenset.claims() };
delete userinfo.username;
// Expect the username to be the given name (unchanged case)
const expectUsername = userinfo.given_name;
delete userinfo.preferred_username;
// Expect the username to be the "username"
const expectUsername = userinfo.username;
// Act
const { user } = await validate({ ...tokenset, claims: () => userinfo });
@@ -199,11 +199,11 @@ describe('setupOpenId', () => {
);
});
it('should use email as username when username and given_name are missing', async () => {
// Arrange remove username and given_name
it('should use email as username when username and preferred_username are missing', async () => {
// Arrange remove username and preferred_username
const userinfo = { ...tokenset.claims() };
delete userinfo.username;
delete userinfo.given_name;
delete userinfo.preferred_username;
const expectUsername = userinfo.email;
// Act
@@ -262,17 +262,20 @@ describe('setupOpenId', () => {
});
it('should update an existing user on login', async () => {
// Arrange simulate that a user already exists
// Arrange simulate that a user already exists with openid provider
const existingUser = {
_id: 'existingUserId',
provider: 'local',
provider: 'openid',
email: tokenset.claims().email,
openidId: '',
username: '',
name: '',
};
findUser.mockImplementation(async (query) => {
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
if (
query.openidId === tokenset.claims().sub ||
(query.email === tokenset.claims().email && query.provider === 'openid')
) {
return existingUser;
}
return null;
@@ -289,18 +292,44 @@ describe('setupOpenId', () => {
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.username,
username: userinfo.preferred_username,
name: `${userinfo.given_name} ${userinfo.family_name}`,
}),
);
});
it('should block login when email exists with different provider', async () => {
// Arrange simulate that a user exists with same email but different provider
const existingUser = {
_id: 'existingUserId',
provider: 'google',
email: tokenset.claims().email,
googleId: 'some-google-id',
username: 'existinguser',
name: 'Existing User',
};
findUser.mockImplementation(async (query) => {
if (query.email === tokenset.claims().email && !query.provider) {
return existingUser;
}
return null;
});
// Act
const result = await validate(tokenset);
// Assert verify that the strategy rejects login
expect(result.user).toBe(false);
expect(result.details.message).toBe(ErrorTypes.AUTH_FAILED);
expect(createUser).not.toHaveBeenCalled();
expect(updateUser).not.toHaveBeenCalled();
});
it('should enforce the required role and reject login if missing', async () => {
// Arrange simulate a token without the required role.
jwtDecode.mockReturnValue({
roles: ['SomeOtherRole'],
});
const userinfo = tokenset.claims();
// Act
const { user, details } = await validate(tokenset);
@@ -311,9 +340,6 @@ describe('setupOpenId', () => {
});
it('should attempt to download and save the avatar if picture is provided', async () => {
// Arrange ensure userinfo contains a picture URL
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);

View File

@@ -22,9 +22,12 @@ const handleExistingUser = async (oldUser, avatarUrl) => {
const isLocal = fileStrategy === FileSources.local;
let updatedAvatar = false;
if (isLocal && (oldUser.avatar === null || !oldUser.avatar.includes('?manual=true'))) {
const hasManualFlag =
typeof oldUser?.avatar === 'string' && oldUser.avatar.includes('?manual=true');
if (isLocal && (!oldUser?.avatar || !hasManualFlag)) {
updatedAvatar = avatarUrl;
} else if (!isLocal && (oldUser.avatar === null || !oldUser.avatar.includes('?manual=true'))) {
} else if (!isLocal && (!oldUser?.avatar || !hasManualFlag)) {
const userId = oldUser._id;
const resizedBuffer = await resizeAvatar({
userId,

View File

@@ -0,0 +1,164 @@
const { FileSources } = require('librechat-data-provider');
const { handleExistingUser } = require('./process');
jest.mock('~/server/services/Files/strategies', () => ({
getStrategyFunctions: jest.fn(),
}));
jest.mock('~/server/services/Files/images/avatar', () => ({
resizeAvatar: jest.fn(),
}));
jest.mock('~/models', () => ({
updateUser: jest.fn(),
createUser: jest.fn(),
getUserById: jest.fn(),
}));
jest.mock('~/server/services/Config', () => ({
getBalanceConfig: jest.fn(),
}));
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { resizeAvatar } = require('~/server/services/Files/images/avatar');
const { updateUser } = require('~/models');
describe('handleExistingUser', () => {
beforeEach(() => {
jest.clearAllMocks();
process.env.CDN_PROVIDER = FileSources.local;
});
it('should handle null avatar without throwing error', async () => {
const oldUser = {
_id: 'user123',
avatar: null,
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle undefined avatar without throwing error', async () => {
const oldUser = {
_id: 'user123',
// avatar is undefined
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should not update avatar if it has manual=true flag', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/avatar.png?manual=true',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).not.toHaveBeenCalled();
});
it('should update avatar for local storage when avatar has no manual flag', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/old-avatar.png',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should process avatar for non-local storage', async () => {
process.env.CDN_PROVIDER = 's3';
const mockProcessAvatar = jest.fn().mockResolvedValue('processed-avatar-url');
getStrategyFunctions.mockReturnValue({ processAvatar: mockProcessAvatar });
resizeAvatar.mockResolvedValue(Buffer.from('resized-image'));
const oldUser = {
_id: 'user123',
avatar: null,
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(resizeAvatar).toHaveBeenCalledWith({
userId: 'user123',
input: avatarUrl,
});
expect(mockProcessAvatar).toHaveBeenCalledWith({
buffer: Buffer.from('resized-image'),
userId: 'user123',
manual: 'false',
});
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: 'processed-avatar-url' });
});
it('should not update if avatar already has manual flag in non-local storage', async () => {
process.env.CDN_PROVIDER = 's3';
const oldUser = {
_id: 'user123',
avatar: 'https://cdn.example.com/avatar.png?manual=true',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(resizeAvatar).not.toHaveBeenCalled();
expect(updateUser).not.toHaveBeenCalled();
});
it('should handle avatar with query parameters but without manual flag', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/avatar.png?size=large&format=webp',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle empty string avatar', async () => {
const oldUser = {
_id: 'user123',
avatar: '',
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle avatar with manual=false parameter', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/avatar.png?manual=false',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle oldUser being null gracefully', async () => {
const avatarUrl = 'https://example.com/avatar.png';
// This should throw an error when trying to access oldUser._id
await expect(handleExistingUser(null, avatarUrl)).rejects.toThrow();
});
});

View File

@@ -2,6 +2,7 @@ const fs = require('fs');
const path = require('path');
const fetch = require('node-fetch');
const passport = require('passport');
const { ErrorTypes } = require('librechat-data-provider');
const { hashToken, logger } = require('@librechat/data-schemas');
const { Strategy: SamlStrategy } = require('@node-saml/passport-saml');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
@@ -203,6 +204,15 @@ async function setupSaml() {
);
}
if (user && user.provider !== 'saml') {
logger.info(
`[samlStrategy] User ${user.email} already exists with provider ${user.provider}`,
);
return done(null, false, {
message: ErrorTypes.AUTH_FAILED,
});
}
const fullName = getFullName(profile);
const username = convertToUsername(

View File

@@ -378,11 +378,11 @@ u7wlOSk+oFzDIO/UILIA
});
it('should update an existing user on login', async () => {
// Set up findUser to return an existing user
// Set up findUser to return an existing user with saml provider
const { findUser } = require('~/models');
const existingUser = {
_id: 'existing-user-id',
provider: 'local',
provider: 'saml',
email: baseProfile.email,
samlId: '',
username: 'oldusername',
@@ -400,6 +400,26 @@ u7wlOSk+oFzDIO/UILIA
expect(user.email).toBe(baseProfile.email);
});
it('should block login when email exists with different provider', async () => {
// Set up findUser to return a user with different provider
const { findUser } = require('~/models');
const existingUser = {
_id: 'existing-user-id',
provider: 'google',
email: baseProfile.email,
googleId: 'some-google-id',
username: 'existinguser',
name: 'Existing User',
};
findUser.mockResolvedValue(existingUser);
const profile = { ...baseProfile };
const result = await validate(profile);
expect(result.user).toBe(false);
expect(result.details.message).toBe(require('librechat-data-provider').ErrorTypes.AUTH_FAILED);
});
it('should attempt to download and save the avatar if picture is provided', async () => {
const profile = { ...baseProfile };

View File

@@ -1,6 +1,7 @@
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { ErrorTypes } = require('librechat-data-provider');
const { createSocialUser, handleExistingUser } = require('./process');
const { isEnabled } = require('~/server/utils');
const { findUser } = require('~/models');
const socialLogin =
@@ -11,12 +12,20 @@ const socialLogin =
profile,
});
const oldUser = await findUser({ email: email.trim() });
const existingUser = await findUser({ email: email.trim() });
const ALLOW_SOCIAL_REGISTRATION = isEnabled(process.env.ALLOW_SOCIAL_REGISTRATION);
if (oldUser) {
await handleExistingUser(oldUser, avatarUrl);
return cb(null, oldUser);
if (existingUser?.provider === provider) {
await handleExistingUser(existingUser, avatarUrl);
return cb(null, existingUser);
} else if (existingUser) {
logger.info(
`[${provider}Login] User ${email} already exists with provider ${existingUser.provider}`,
);
const error = new Error(ErrorTypes.AUTH_FAILED);
error.code = ErrorTypes.AUTH_FAILED;
error.provider = existingUser.provider;
return cb(error);
}
if (ALLOW_SOCIAL_REGISTRATION) {

View File

@@ -1370,7 +1370,7 @@
* @property {string} [model] - The model that the assistant used for this run.
* @property {string} [instructions] - The instructions that the assistant used for this run.
* @property {string} [additional_instructions] - Optional. Appends additional instructions
* at theend of the instructions for the run. This is useful for modifying
* at the end of the instructions for the run. This is useful for modifying
* @property {Tool[]} [tools] - The list of tools used for this run.
* @property {string[]} [file_ids] - The list of File IDs used for this run.
* @property {Object} [metadata] - Metadata associated with this run.

View File

@@ -19,6 +19,9 @@ const openAIModels = {
'gpt-4.1': 1047576,
'gpt-4.1-mini': 1047576,
'gpt-4.1-nano': 1047576,
'gpt-5': 400000,
'gpt-5-mini': 400000,
'gpt-5-nano': 400000,
'gpt-4o': 127500, // -500 from max
'gpt-4o-mini': 127500, // -500 from max
'gpt-4o-2024-05-13': 127500, // -500 from max
@@ -234,6 +237,9 @@ const aggregateModels = {
...xAIModels,
// misc.
kimi: 131000,
// GPT-OSS
'gpt-oss-20b': 131000,
'gpt-oss-120b': 131000,
};
const maxTokensMap = {
@@ -250,6 +256,11 @@ const modelMaxOutputs = {
o1: 32268, // -500 from max: 32,768
'o1-mini': 65136, // -500 from max: 65,536
'o1-preview': 32268, // -500 from max: 32,768
'gpt-5': 128000,
'gpt-5-mini': 128000,
'gpt-5-nano': 128000,
'gpt-oss-20b': 131000,
'gpt-oss-120b': 131000,
system_default: 1024,
};
@@ -468,10 +479,11 @@ const tiktokenModels = new Set([
]);
module.exports = {
tiktokenModels,
maxTokensMap,
inputSchema,
modelSchema,
maxTokensMap,
tiktokenModels,
maxOutputTokensMap,
matchModelName,
processModelData,
getModelMaxTokens,

View File

@@ -1,5 +1,11 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getModelMaxTokens, processModelData, matchModelName, maxTokensMap } = require('./tokens');
const {
maxOutputTokensMap,
getModelMaxTokens,
processModelData,
matchModelName,
maxTokensMap,
} = require('./tokens');
describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
@@ -150,6 +156,35 @@ describe('getModelMaxTokens', () => {
);
});
test('should return correct tokens for gpt-5 matches', () => {
expect(getModelMaxTokens('gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
expect(getModelMaxTokens('gpt-5-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
expect(getModelMaxTokens('openai/gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
expect(getModelMaxTokens('gpt-5-2025-01-30')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5'],
);
});
test('should return correct tokens for gpt-5-mini matches', () => {
expect(getModelMaxTokens('gpt-5-mini')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini']);
expect(getModelMaxTokens('gpt-5-mini-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
);
expect(getModelMaxTokens('openai/gpt-5-mini')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
);
});
test('should return correct tokens for gpt-5-nano matches', () => {
expect(getModelMaxTokens('gpt-5-nano')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano']);
expect(getModelMaxTokens('gpt-5-nano-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
);
expect(getModelMaxTokens('openai/gpt-5-nano')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
);
});
test('should return correct tokens for Anthropic models', () => {
const models = [
'claude-2.1',
@@ -349,6 +384,39 @@ describe('getModelMaxTokens', () => {
expect(getModelMaxTokens('o3')).toBe(o3Tokens);
expect(getModelMaxTokens('openai/o3')).toBe(o3Tokens);
});
test('should return correct tokens for GPT-OSS models', () => {
const expected = maxTokensMap[EModelEndpoint.openAI]['gpt-oss-20b'];
['gpt-oss-20b', 'gpt-oss-120b', 'openai/gpt-oss-20b', 'openai/gpt-oss-120b'].forEach((name) => {
expect(getModelMaxTokens(name)).toBe(expected);
});
});
test('should return correct max output tokens for GPT-5 models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
maxOutputTokensMap[EModelEndpoint.openAI][model],
);
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
);
});
});
test('should return correct max output tokens for GPT-OSS models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
maxOutputTokensMap[EModelEndpoint.openAI][model],
);
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
);
});
});
});
describe('matchModelName', () => {
@@ -420,6 +488,25 @@ describe('matchModelName', () => {
expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
});
it('should return the closest matching key for gpt-5 matches', () => {
expect(matchModelName('openai/gpt-5')).toBe('gpt-5');
expect(matchModelName('gpt-5-preview')).toBe('gpt-5');
expect(matchModelName('gpt-5-2025-01-30')).toBe('gpt-5');
expect(matchModelName('gpt-5-2025-01-30-0130')).toBe('gpt-5');
});
it('should return the closest matching key for gpt-5-mini matches', () => {
expect(matchModelName('openai/gpt-5-mini')).toBe('gpt-5-mini');
expect(matchModelName('gpt-5-mini-preview')).toBe('gpt-5-mini');
expect(matchModelName('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
});
it('should return the closest matching key for gpt-5-nano matches', () => {
expect(matchModelName('openai/gpt-5-nano')).toBe('gpt-5-nano');
expect(matchModelName('gpt-5-nano-preview')).toBe('gpt-5-nano');
expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
});
// Tests for Google models
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/frontend",
"version": "v0.7.9",
"version": "v0.8.0-rc2",
"description": "",
"type": "module",
"scripts": {

View File

@@ -0,0 +1,46 @@
import React, { createContext, useContext, useMemo } from 'react';
import type { TMessage } from 'librechat-data-provider';
import { useChatContext } from './ChatContext';
import { getLatestText } from '~/utils';
interface ArtifactsContextValue {
isSubmitting: boolean;
latestMessageId: string | null;
latestMessageText: string;
conversationId: string | null;
}
const ArtifactsContext = createContext<ArtifactsContextValue | undefined>(undefined);
export function ArtifactsProvider({ children }: { children: React.ReactNode }) {
const { isSubmitting, latestMessage, conversation } = useChatContext();
const latestMessageText = useMemo(() => {
return getLatestText({
messageId: latestMessage?.messageId ?? null,
text: latestMessage?.text ?? null,
content: latestMessage?.content ?? null,
} as TMessage);
}, [latestMessage?.messageId, latestMessage?.text, latestMessage?.content]);
/** Context value only created when relevant values change */
const contextValue = useMemo<ArtifactsContextValue>(
() => ({
isSubmitting,
latestMessageText,
latestMessageId: latestMessage?.messageId ?? null,
conversationId: conversation?.conversationId ?? null,
}),
[isSubmitting, latestMessage?.messageId, latestMessageText, conversation?.conversationId],
);
return <ArtifactsContext.Provider value={contextValue}>{children}</ArtifactsContext.Provider>;
}
export function useArtifactsContext() {
const context = useContext(ArtifactsContext);
if (!context) {
throw new Error('useArtifactsContext must be used within ArtifactsProvider');
}
return context;
}

View File

@@ -23,4 +23,5 @@ export * from './SetConvoContext';
export * from './SearchContext';
export * from './BadgeRowContext';
export * from './SidePanelContext';
export * from './ArtifactsContext';
export { default as BadgeRowProvider } from './BadgeRowContext';

View File

@@ -1,5 +1,5 @@
import debounce from 'lodash/debounce';
import React, { memo, useEffect, useMemo, useCallback } from 'react';
import React, { useMemo, useState, useEffect, useCallback } from 'react';
import {
useSandpack,
SandpackCodeEditor,
@@ -10,8 +10,8 @@ import type { SandpackBundlerFile } from '@codesandbox/sandpack-client';
import type { CodeEditorRef } from '@codesandbox/sandpack-react';
import type { ArtifactFiles, Artifact } from '~/common';
import { useEditArtifact, useGetStartupConfig } from '~/data-provider';
import { useEditorContext, useArtifactsContext } from '~/Providers';
import { sharedFiles, sharedOptions } from '~/utils/artifacts';
import { useEditorContext } from '~/Providers';
const createDebouncedMutation = (
callback: (params: {
@@ -29,18 +29,21 @@ const CodeEditor = ({
editorRef,
}: {
fileKey: string;
readOnly: boolean;
readOnly?: boolean;
artifact: Artifact;
editorRef: React.MutableRefObject<CodeEditorRef>;
}) => {
const { sandpack } = useSandpack();
const [currentUpdate, setCurrentUpdate] = useState<string | null>(null);
const { isMutating, setIsMutating, setCurrentCode } = useEditorContext();
const editArtifact = useEditArtifact({
onMutate: () => {
onMutate: (vars) => {
setIsMutating(true);
setCurrentUpdate(vars.updated);
},
onSuccess: () => {
setIsMutating(false);
setCurrentUpdate(null);
},
onError: () => {
setIsMutating(false);
@@ -71,8 +74,14 @@ const CodeEditor = ({
}
const currentCode = (sandpack.files['/' + fileKey] as SandpackBundlerFile | undefined)?.code;
const isNotOriginal =
currentCode && artifact.content != null && currentCode.trim() !== artifact.content.trim();
const isNotRepeated =
currentUpdate == null
? true
: currentCode != null && currentCode.trim() !== currentUpdate.trim();
if (currentCode && artifact.content != null && currentCode.trim() !== artifact.content.trim()) {
if (artifact.content && isNotOriginal && isNotRepeated) {
setCurrentCode(currentCode);
debouncedMutation({
index: artifact.index,
@@ -92,8 +101,9 @@ const CodeEditor = ({
artifact.messageId,
readOnly,
isMutating,
sandpack.files,
currentUpdate,
setIsMutating,
sandpack.files,
setCurrentCode,
debouncedMutation,
]);
@@ -102,33 +112,32 @@ const CodeEditor = ({
<SandpackCodeEditor
ref={editorRef}
showTabs={false}
readOnly={readOnly}
showRunButton={false}
showLineNumbers={true}
showInlineErrors={true}
readOnly={readOnly === true}
className="hljs language-javascript bg-black"
/>
);
};
export const ArtifactCodeEditor = memo(function ({
export const ArtifactCodeEditor = function ({
files,
fileKey,
template,
artifact,
editorRef,
sharedProps,
isSubmitting,
}: {
fileKey: string;
artifact: Artifact;
files: ArtifactFiles;
isSubmitting: boolean;
template: SandpackProviderProps['template'];
sharedProps: Partial<SandpackProviderProps>;
editorRef: React.MutableRefObject<CodeEditorRef>;
}) {
const { data: config } = useGetStartupConfig();
const { isSubmitting } = useArtifactsContext();
const options: typeof sharedOptions = useMemo(() => {
if (!config) {
return sharedOptions;
@@ -138,6 +147,10 @@ export const ArtifactCodeEditor = memo(function ({
bundlerURL: template === 'static' ? config.staticBundlerURL : config.bundlerURL,
};
}, [config, template]);
const [readOnly, setReadOnly] = useState(isSubmitting ?? false);
useEffect(() => {
setReadOnly(isSubmitting ?? false);
}, [isSubmitting]);
if (Object.keys(files).length === 0) {
return null;
@@ -154,12 +167,7 @@ export const ArtifactCodeEditor = memo(function ({
{...sharedProps}
template={template}
>
<CodeEditor
editorRef={editorRef}
fileKey={fileKey}
readOnly={isSubmitting}
artifact={artifact}
/>
<CodeEditor fileKey={fileKey} artifact={artifact} editorRef={editorRef} readOnly={readOnly} />
</StyledProvider>
);
});
};

View File

@@ -2,12 +2,12 @@ import { useRef, useEffect } from 'react';
import * as Tabs from '@radix-ui/react-tabs';
import type { SandpackPreviewRef, CodeEditorRef } from '@codesandbox/sandpack-react';
import type { Artifact } from '~/common';
import { useEditorContext, useArtifactsContext } from '~/Providers';
import useArtifactProps from '~/hooks/Artifacts/useArtifactProps';
import { useAutoScroll } from '~/hooks/Artifacts/useAutoScroll';
import { ArtifactCodeEditor } from './ArtifactCodeEditor';
import { useGetStartupConfig } from '~/data-provider';
import { ArtifactPreview } from './ArtifactPreview';
import { useEditorContext } from '~/Providers';
import { cn } from '~/utils';
export default function ArtifactTabs({
@@ -15,14 +15,13 @@ export default function ArtifactTabs({
isMermaid,
editorRef,
previewRef,
isSubmitting,
}: {
artifact: Artifact;
isMermaid: boolean;
isSubmitting: boolean;
editorRef: React.MutableRefObject<CodeEditorRef>;
previewRef: React.MutableRefObject<SandpackPreviewRef>;
}) {
const { isSubmitting } = useArtifactsContext();
const { currentCode, setCurrentCode } = useEditorContext();
const { data: startupConfig } = useGetStartupConfig();
const lastIdRef = useRef<string | null>(null);
@@ -52,7 +51,6 @@ export default function ArtifactTabs({
artifact={artifact}
editorRef={editorRef}
sharedProps={sharedProps}
isSubmitting={isSubmitting}
/>
</Tabs.Content>
<Tabs.Content

View File

@@ -29,7 +29,6 @@ export default function Artifacts() {
isMermaid,
setActiveTab,
currentIndex,
isSubmitting,
cycleArtifact,
currentArtifact,
orderedArtifactIds,
@@ -116,7 +115,6 @@ export default function Artifacts() {
<ArtifactTabs
isMermaid={isMermaid}
artifact={currentArtifact}
isSubmitting={isSubmitting}
editorRef={editorRef as React.MutableRefObject<CodeEditorRef>}
previewRef={previewRef as React.MutableRefObject<SandpackPreviewRef>}
/>

View File

@@ -1,6 +1,7 @@
import { useOutletContext, useSearchParams } from 'react-router-dom';
import { useEffect, useState } from 'react';
import { OpenIDIcon } from '@librechat/client';
import { ErrorTypes } from 'librechat-data-provider';
import { OpenIDIcon, useToastContext } from '@librechat/client';
import { useOutletContext, useSearchParams } from 'react-router-dom';
import type { TLoginLayoutContext } from '~/common';
import { ErrorMessage } from '~/components/Auth/ErrorMessage';
import SocialButton from '~/components/Auth/SocialButton';
@@ -11,6 +12,7 @@ import LoginForm from './LoginForm';
function Login() {
const localize = useLocalize();
const { showToast } = useToastContext();
const { error, setError, login } = useAuthContext();
const { startupConfig } = useOutletContext<TLoginLayoutContext>();
@@ -21,6 +23,19 @@ function Login() {
// Persist the disable flag locally so that once detected, auto-redirect stays disabled.
const [isAutoRedirectDisabled, setIsAutoRedirectDisabled] = useState(disableAutoRedirect);
useEffect(() => {
const oauthError = searchParams?.get('error');
if (oauthError && oauthError === ErrorTypes.AUTH_FAILED) {
showToast({
message: localize('com_auth_error_oauth_failed'),
status: 'error',
});
const newParams = new URLSearchParams(searchParams);
newParams.delete('error');
setSearchParams(newParams, { replace: true });
}
}, [searchParams, setSearchParams, showToast, localize]);
// Once the disable flag is detected, update local state and remove the parameter from the URL.
useEffect(() => {
if (disableAutoRedirect) {

View File

@@ -36,6 +36,7 @@ function AttachFileChat({ disableInputs }: { disableInputs: boolean }) {
disabled={disableInputs}
conversationId={conversationId}
endpointFileConfig={endpointFileConfig}
endpoint={endpoint}
/>
);
}

View File

@@ -1,7 +1,7 @@
import React, { useRef, useState, useMemo } from 'react';
import * as Ariakit from '@ariakit/react';
import { useSetRecoilState } from 'recoil';
import { FileSearch, ImageUpIcon, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
import { FileSearch, ImageUpIcon, TerminalSquareIcon, FileType2Icon, FileText } from 'lucide-react';
import { FileUpload, TooltipAnchor, DropdownPopup, AttachmentIcon } from '@librechat/client';
import { EToolResources, EModelEndpoint, defaultAgentCapabilities } from 'librechat-data-provider';
import type { EndpointFileConfig } from 'librechat-data-provider';
@@ -13,9 +13,15 @@ interface AttachFileMenuProps {
conversationId: string;
disabled?: boolean | null;
endpointFileConfig?: EndpointFileConfig;
endpoint?: string | null;
}
const AttachFileMenu = ({ disabled, conversationId, endpointFileConfig }: AttachFileMenuProps) => {
const AttachFileMenu = ({
disabled,
conversationId,
endpointFileConfig,
endpoint,
}: AttachFileMenuProps) => {
const localize = useLocalize();
const isUploadDisabled = disabled ?? false;
const inputRef = useRef<HTMLInputElement>(null);
@@ -23,7 +29,7 @@ const AttachFileMenu = ({ disabled, conversationId, endpointFileConfig }: Attach
const setEphemeralAgent = useSetRecoilState(ephemeralAgentByConvoId(conversationId));
const [toolResource, setToolResource] = useState<EToolResources | undefined>();
const { handleFileChange } = useFileHandling({
overrideEndpoint: EModelEndpoint.agents,
overrideEndpoint: endpoint === EModelEndpoint.anthropic ? undefined : EModelEndpoint.agents,
overrideEndpointFileConfig: endpointFileConfig,
});
@@ -34,12 +40,18 @@ const AttachFileMenu = ({ disabled, conversationId, endpointFileConfig }: Attach
* */
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
const handleUploadClick = (isImage?: boolean) => {
const handleUploadClick = (fileType?: 'image' | 'document') => {
if (!inputRef.current) {
return;
}
inputRef.current.value = '';
inputRef.current.accept = isImage === true ? 'image/*' : '';
if (fileType === 'image') {
inputRef.current.accept = 'image/*';
} else if (fileType === 'document') {
inputRef.current.accept = '.pdf,application/pdf';
} else {
inputRef.current.accept = '';
}
inputRef.current.click();
inputRef.current.accept = '';
};
@@ -50,12 +62,24 @@ const AttachFileMenu = ({ disabled, conversationId, endpointFileConfig }: Attach
label: localize('com_ui_upload_image_input'),
onClick: () => {
setToolResource(undefined);
handleUploadClick(true);
handleUploadClick('image');
},
icon: <ImageUpIcon className="icon-md" />,
},
];
// Add document upload option for Anthropic endpoints
if (endpoint === EModelEndpoint.anthropic) {
items.push({
label: 'Upload to Provider',
onClick: () => {
setToolResource(undefined);
handleUploadClick('document');
},
icon: <FileText className="icon-md" />,
});
}
if (capabilities.ocrEnabled) {
items.push({
label: localize('com_ui_upload_ocr_text'),
@@ -95,7 +119,7 @@ const AttachFileMenu = ({ disabled, conversationId, endpointFileConfig }: Attach
}
return items;
}, [capabilities, localize, setToolResource, setEphemeralAgent]);
}, [capabilities, localize, setToolResource, setEphemeralAgent, endpoint]);
const menuTrigger = (
<TooltipAnchor

View File

@@ -4,7 +4,7 @@ import { FileSources, LocalStorageKeys } from 'librechat-data-provider';
import type { ExtendedFile } from '~/common';
import { useDeleteFilesMutation } from '~/data-provider';
import DragDropWrapper from '~/components/Chat/Input/Files/DragDropWrapper';
import { EditorProvider, SidePanelProvider } from '~/Providers';
import { EditorProvider, SidePanelProvider, ArtifactsProvider } from '~/Providers';
import Artifacts from '~/components/Artifacts/Artifacts';
import { SidePanelGroup } from '~/components/SidePanel';
import { useSetFilesToDelete } from '~/hooks';
@@ -66,9 +66,11 @@ export default function Presentation({ children }: { children: React.ReactNode }
defaultCollapsed={defaultCollapsed}
artifacts={
artifactsVisibility === true && Object.keys(artifacts ?? {}).length > 0 ? (
<EditorProvider>
<Artifacts />
</EditorProvider>
<ArtifactsProvider>
<EditorProvider>
<Artifacts />
</EditorProvider>
</ArtifactsProvider>
) : null
}
>

View File

@@ -25,7 +25,7 @@ type EndpointIcon = {
function getOpenAIColor(_model: string | null | undefined) {
const model = _model?.toLowerCase() ?? '';
if (model && /\b(o\d)\b/i.test(model)) {
if (model && (/\b(o\d)\b/i.test(model) || /\bgpt-[5-9]\b/i.test(model))) {
return '#000000';
}
return model.includes('gpt-4') ? '#AB68FF' : '#19C37D';

View File

@@ -1,6 +1,6 @@
import React, { useMemo } from 'react';
import { useForm, Controller } from 'react-hook-form';
import { Input, Label, Button } from '@librechat/client';
import { Input, Label, Button, TooltipAnchor, CircleHelpIcon } from '@librechat/client';
import { useMCPAuthValuesQuery } from '~/data-provider/Tools/queries';
import { useLocalize } from '~/hooks';
@@ -31,16 +31,25 @@ function AuthField({ name, config, hasValue, control, errors }: AuthFieldProps)
return (
<div className="space-y-2">
<div className="flex items-center justify-between">
<Label htmlFor={name} className="text-sm font-medium">
{config.title}
</Label>
<TooltipAnchor
enableHTML={true}
description={config.description || ''}
render={
<div className="flex items-center gap-2">
<Label htmlFor={name} className="text-sm font-medium">
{config.title}
</Label>
<CircleHelpIcon className="h-6 w-6 cursor-help text-text-secondary transition-colors hover:text-text-primary" />
</div>
}
/>
{hasValue ? (
<div className="flex min-w-fit items-center gap-2 whitespace-nowrap rounded-full border border-border-medium px-2 py-0.5 text-xs font-medium text-text-secondary">
<div className="flex min-w-fit items-center gap-2 whitespace-nowrap rounded-full border border-border-light px-2 py-0.5 text-xs font-medium text-text-secondary">
<div className="h-1.5 w-1.5 rounded-full bg-green-500" />
<span>{localize('com_ui_set')}</span>
</div>
) : (
<div className="flex min-w-fit items-center gap-2 whitespace-nowrap rounded-full border border-border-medium px-2 py-0.5 text-xs font-medium text-text-secondary">
<div className="flex min-w-fit items-center gap-2 whitespace-nowrap rounded-full border border-border-light px-2 py-0.5 text-xs font-medium text-text-secondary">
<div className="h-1.5 w-1.5 rounded-full border border-border-medium" />
<span>{localize('com_ui_unset')}</span>
</div>
@@ -60,16 +69,10 @@ function AuthField({ name, config, hasValue, control, errors }: AuthFieldProps)
? localize('com_ui_mcp_update_var', { 0: config.title })
: localize('com_ui_mcp_enter_var', { 0: config.title })
}
className="w-full rounded-md border-gray-300 shadow-sm focus:border-indigo-500 focus:ring-indigo-500 dark:border-gray-600 dark:bg-gray-700 dark:text-white sm:text-sm"
className="w-full shadow-sm sm:text-sm"
/>
)}
/>
{config.description && (
<p
className="text-xs text-text-secondary [&_a]:text-blue-500 [&_a]:hover:text-blue-600 dark:[&_a]:text-blue-400 dark:[&_a]:hover:text-blue-300"
dangerouslySetInnerHTML={{ __html: config.description }}
/>
)}
{errors[name] && <p className="text-xs text-red-500">{errors[name]?.message}</p>}
</div>
);
@@ -114,15 +117,11 @@ export default function CustomUserVarsSection({
};
if (!fields || Object.keys(fields).length === 0) {
return (
<div className="p-4 text-center text-sm text-gray-500">
{localize('com_sidepanel_mcp_no_custom_vars', { '0': serverName })}
</div>
);
return null;
}
return (
<div className="space-y-4">
<div className="flex-1 space-y-4">
<form onSubmit={handleSubmit(onFormSubmit)} className="space-y-4">
{Object.entries(fields).map(([key, config]) => {
const hasValue = authValuesData?.authValueFlags?.[key] || false;
@@ -140,21 +139,11 @@ export default function CustomUserVarsSection({
})}
</form>
<div className="flex justify-end gap-2 pt-2">
<Button
onClick={handleRevokeClick}
className="bg-red-600 text-white hover:bg-red-700 dark:hover:bg-red-800"
disabled={isSubmitting}
size="sm"
>
<div className="flex justify-end gap-2">
<Button onClick={handleRevokeClick} variant="destructive" disabled={isSubmitting}>
{localize('com_ui_revoke')}
</Button>
<Button
onClick={handleSubmit(onFormSubmit)}
className="bg-green-500 text-white hover:bg-green-600"
disabled={isSubmitting}
size="sm"
>
<Button onClick={handleSubmit(onFormSubmit)} variant="submit" disabled={isSubmitting}>
{isSubmitting ? localize('com_ui_saving') : localize('com_ui_save')}
</Button>
</div>

View File

@@ -1,11 +1,11 @@
import React from 'react';
import { Loader2, KeyRound, PlugZap, AlertTriangle } from 'lucide-react';
import { KeyRound, PlugZap, AlertTriangle } from 'lucide-react';
import {
Spinner,
OGDialog,
OGDialogTitle,
OGDialogHeader,
OGDialogContent,
OGDialogDescription,
} from '@librechat/client';
import type { MCPServerStatus } from 'librechat-data-provider';
import ServerInitializationSection from './ServerInitializationSection';
@@ -45,9 +45,6 @@ export default function MCPConfigDialog({
const dialogTitle = hasFields
? localize('com_ui_configure_mcp_variables_for', { 0: serverName })
: `${serverName} MCP Server`;
const dialogDescription = hasFields
? localize('com_ui_mcp_dialog_desc')
: `Manage connection and settings for the ${serverName} MCP server.`;
// Helper function to render status badge based on connection state
const renderStatusBadge = () => {
@@ -60,7 +57,7 @@ export default function MCPConfigDialog({
if (connectionState === 'connecting') {
return (
<div className="flex items-center gap-2 rounded-full bg-blue-50 px-2 py-0.5 text-xs font-medium text-blue-600 dark:bg-blue-950 dark:text-blue-400">
<Loader2 className="h-3 w-3 animate-spin" />
<Spinner className="h-3 w-3" />
<span>{localize('com_ui_connecting')}</span>
</div>
);
@@ -107,26 +104,24 @@ export default function MCPConfigDialog({
return (
<OGDialog open={isOpen} onOpenChange={onOpenChange}>
<OGDialogContent className="flex max-h-[90vh] w-full max-w-md flex-col">
<OGDialogContent className="flex max-h-screen w-11/12 max-w-lg flex-col space-y-2">
<OGDialogHeader>
<div className="flex items-center gap-3">
<OGDialogTitle>{dialogTitle}</OGDialogTitle>
<OGDialogTitle className="text-xl">
{dialogTitle.charAt(0).toUpperCase() + dialogTitle.slice(1)}
</OGDialogTitle>
{renderStatusBadge()}
</div>
<OGDialogDescription>{dialogDescription}</OGDialogDescription>
</OGDialogHeader>
{/* Content */}
<div className="flex-1 overflow-y-auto p-6">
{/* Custom User Variables Section */}
<CustomUserVarsSection
serverName={serverName}
fields={fieldsSchema}
onSave={onSave}
onRevoke={onRevoke || (() => {})}
isSubmitting={isSubmitting}
/>
</div>
{/* Custom User Variables Section */}
<CustomUserVarsSection
serverName={serverName}
fields={fieldsSchema}
onSave={onSave}
onRevoke={onRevoke || (() => {})}
isSubmitting={isSubmitting}
/>
{/* Server Initialization Section */}
<ServerInitializationSection

View File

@@ -1,5 +1,6 @@
import React from 'react';
import { SettingsIcon, AlertTriangle, Loader2, KeyRound, PlugZap, X } from 'lucide-react';
import { Spinner } from '@librechat/client';
import { SettingsIcon, AlertTriangle, KeyRound, PlugZap, X } from 'lucide-react';
import type { MCPServerStatus, TPlugin } from 'librechat-data-provider';
import { useLocalize } from '~/hooks';
@@ -96,12 +97,12 @@ function InitializingStatusIcon({ serverName, onCancel, canCancel }: Initializin
<button
type="button"
onClick={onCancel}
className="flex h-6 w-6 items-center justify-center rounded p-1 hover:bg-red-100 dark:hover:bg-red-900/20"
className="group flex h-6 w-6 items-center justify-center rounded p-1 hover:bg-red-100 dark:hover:bg-red-900/20"
aria-label={localize('com_ui_cancel')}
title={localize('com_ui_cancel')}
>
<div className="group relative h-4 w-4">
<Loader2 className="h-4 w-4 animate-spin text-blue-500 group-hover:opacity-0" />
<div className="relative h-4 w-4">
<Spinner className="h-4 w-4 group-hover:opacity-0" />
<X className="absolute inset-0 h-4 w-4 text-red-500 opacity-0 group-hover:opacity-100" />
</div>
</button>
@@ -110,8 +111,8 @@ function InitializingStatusIcon({ serverName, onCancel, canCancel }: Initializin
return (
<div className="flex h-6 w-6 items-center justify-center rounded p-1">
<Loader2
className="h-4 w-4 animate-spin text-blue-500"
<Spinner
className="h-4 w-4"
aria-label={localize('com_nav_mcp_status_connecting', { 0: serverName })}
/>
</div>
@@ -121,8 +122,8 @@ function InitializingStatusIcon({ serverName, onCancel, canCancel }: Initializin
function ConnectingStatusIcon({ serverName }: StatusIconProps) {
return (
<div className="flex h-6 w-6 items-center justify-center rounded p-1">
<Loader2
className="h-4 w-4 animate-spin text-blue-500"
<Spinner
className="h-4 w-4"
aria-label={localize('com_nav_mcp_status_connecting', { 0: serverName })}
/>
</div>

View File

@@ -1,23 +1,24 @@
import React, { useCallback } from 'react';
import { Button } from '@librechat/client';
import { RefreshCw, Link } from 'lucide-react';
import React from 'react';
import { RefreshCw } from 'lucide-react';
import { Button, Spinner } from '@librechat/client';
import { useMCPServerManager } from '~/hooks/MCP/useMCPServerManager';
import { useLocalize } from '~/hooks';
interface ServerInitializationSectionProps {
sidePanel?: boolean;
serverName: string;
requiresOAuth: boolean;
hasCustomUserVars?: boolean;
}
export default function ServerInitializationSection({
sidePanel = false,
serverName,
requiresOAuth,
hasCustomUserVars = false,
}: ServerInitializationSectionProps) {
const localize = useLocalize();
// Use the centralized server manager instead of the old initialization hook so we can handle multiple oauth flows at once
const {
initializeServer,
connectionStatus,
@@ -33,99 +34,66 @@ export default function ServerInitializationSection({
const isServerInitializing = isInitializing(serverName);
const serverOAuthUrl = getOAuthUrl(serverName);
const handleInitializeClick = useCallback(() => {
initializeServer(serverName, false);
}, [initializeServer, serverName]);
const shouldShowReinit = isConnected && (requiresOAuth || hasCustomUserVars);
const shouldShowInit = !isConnected && !serverOAuthUrl;
const handleCancelClick = useCallback(() => {
cancelOAuthFlow(serverName);
}, [cancelOAuthFlow, serverName]);
if (isConnected && (requiresOAuth || hasCustomUserVars)) {
return (
<div className="flex justify-start">
<button
onClick={handleInitializeClick}
disabled={isServerInitializing}
className="flex items-center gap-1 text-xs text-gray-400 hover:text-gray-600 disabled:opacity-50 dark:text-gray-500 dark:hover:text-gray-400"
>
<RefreshCw className={`h-3 w-3 ${isServerInitializing ? 'animate-spin' : ''}`} />
{isServerInitializing ? localize('com_ui_loading') : localize('com_ui_reinitialize')}
</button>
</div>
);
}
if (isConnected) {
if (!shouldShowReinit && !shouldShowInit && !serverOAuthUrl) {
return null;
}
return (
<div className="rounded-lg border border-amber-200 bg-amber-50 p-4 dark:border-amber-700 dark:bg-amber-900/20">
<div className="flex items-center justify-between">
if (serverOAuthUrl) {
return (
<>
<div className="flex items-center gap-2">
<span className="text-sm font-medium text-amber-800 dark:text-amber-200">
{requiresOAuth
? localize('com_ui_mcp_not_authenticated', { 0: serverName })
: localize('com_ui_mcp_not_initialized', { 0: serverName })}
</span>
</div>
{/* Only show authenticate button when OAuth URL is not present */}
{!serverOAuthUrl && (
<Button
onClick={handleInitializeClick}
disabled={isServerInitializing}
className="btn btn-primary focus:shadow-outline flex w-full items-center justify-center px-4 py-2 font-semibold text-white hover:bg-green-600 focus:border-green-500"
onClick={() => cancelOAuthFlow(serverName)}
disabled={!canCancel}
variant="outline"
title={!canCancel ? 'disabled' : undefined}
>
{isServerInitializing ? (
<>
<RefreshCw className="h-4 w-4 animate-spin" />
{localize('com_ui_loading')}
</>
) : (
<>
<RefreshCw className="h-4 w-4" />
{requiresOAuth
? localize('com_ui_authenticate')
: localize('com_ui_mcp_initialize')}
</>
)}
{localize('com_ui_cancel')}
</Button>
<Button
variant="submit"
onClick={() => window.open(serverOAuthUrl, '_blank', 'noopener,noreferrer')}
className="flex-1"
>
{localize('com_ui_continue_oauth')}
</Button>
)}
</div>
{/* OAuth URL display */}
{serverOAuthUrl && (
<div className="mt-4 rounded-lg border border-blue-200 bg-blue-50 p-3 dark:border-blue-700 dark:bg-blue-900/20">
<div className="mb-2 flex items-center gap-2">
<div className="flex h-4 w-4 items-center justify-center rounded-full bg-blue-500">
<Link className="h-2.5 w-2.5 text-white" />
</div>
<span className="text-sm font-medium text-blue-700 dark:text-blue-300">
{localize('com_ui_auth_url')}
</span>
</div>
<div className="flex items-center gap-2">
<Button
onClick={() => window.open(serverOAuthUrl, '_blank', 'noopener,noreferrer')}
className="flex-1 bg-green-600 text-white hover:bg-green-700 dark:hover:bg-green-800"
>
{localize('com_ui_continue_oauth')}
</Button>
<Button
onClick={handleCancelClick}
disabled={!canCancel}
className="bg-gray-200 text-gray-700 hover:bg-gray-300 disabled:cursor-not-allowed disabled:opacity-50 dark:bg-gray-700 dark:text-gray-200 dark:hover:bg-gray-600"
title={!canCancel ? 'disabled' : undefined}
>
{localize('com_ui_cancel')}
</Button>
</div>
<p className="mt-2 text-xs text-blue-600 dark:text-blue-400">
{localize('com_ui_oauth_flow_desc')}
</p>
</div>
)}
</>
);
}
// Unified button rendering
const isReinit = shouldShowReinit;
const outerClass = isReinit ? 'flex justify-start' : 'flex justify-end';
const buttonVariant = isReinit ? undefined : 'default';
const buttonText = isServerInitializing
? localize('com_ui_loading')
: isReinit
? localize('com_ui_reinitialize')
: requiresOAuth
? localize('com_ui_authenticate')
: localize('com_ui_mcp_initialize');
const icon = isServerInitializing ? (
<Spinner className="h-4 w-4" />
) : (
<RefreshCw className="h-4 w-4" />
);
return (
<div className={outerClass}>
<Button
variant={buttonVariant}
onClick={() => initializeServer(serverName, false)}
disabled={isServerInitializing}
size={sidePanel ? 'sm' : 'default'}
className="w-full"
>
{icon}
{buttonText}
</Button>
</div>
);
}

View File

@@ -1,6 +1,5 @@
// file deepcode ignore HardcodedNonCryptoSecret: No hardcoded secrets
import { ViolationTypes, ErrorTypes, alternateName } from 'librechat-data-provider';
import type { TOpenAIMessage } from 'librechat-data-provider';
import type { LocalizeFunction } from '~/common';
import { formatJSON, extractJson, isJson } from '~/utils/json';
import { useLocalize } from '~/hooks';
@@ -25,7 +24,7 @@ type TTokenBalance = {
prev_count: number;
violation_count: number;
date: Date;
generations?: TOpenAIMessage[];
generations?: unknown[];
};
type TExpiredKey = {
@@ -44,6 +43,17 @@ const errorMessages = {
[ErrorTypes.NO_BASE_URL]: 'com_error_no_base_url',
[ErrorTypes.INVALID_ACTION]: `com_error_${ErrorTypes.INVALID_ACTION}`,
[ErrorTypes.INVALID_REQUEST]: `com_error_${ErrorTypes.INVALID_REQUEST}`,
[ErrorTypes.MISSING_MODEL]: (json: TGenericError, localize: LocalizeFunction) => {
const { info: endpoint } = json;
const provider = (alternateName[endpoint ?? ''] as string | undefined) ?? endpoint ?? 'unknown';
return localize('com_error_missing_model', { 0: provider });
},
[ErrorTypes.MODELS_NOT_LOADED]: 'com_error_models_not_loaded',
[ErrorTypes.ENDPOINT_MODELS_NOT_LOADED]: (json: TGenericError, localize: LocalizeFunction) => {
const { info: endpoint } = json;
const provider = (alternateName[endpoint ?? ''] as string | undefined) ?? endpoint ?? 'unknown';
return localize('com_error_endpoint_models_not_loaded', { 0: provider });
},
[ErrorTypes.NO_SYSTEM_MESSAGES]: `com_error_${ErrorTypes.NO_SYSTEM_MESSAGES}`,
[ErrorTypes.EXPIRED_USER_KEY]: (json: TExpiredKey, localize: LocalizeFunction) => {
const { expiredAt, endpoint } = json;
@@ -65,6 +75,12 @@ const errorMessages = {
[ErrorTypes.GOOGLE_TOOL_CONFLICT]: 'com_error_google_tool_conflict',
[ViolationTypes.BAN]:
'Your account has been temporarily banned due to violations of our service.',
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: (json: TGenericError, localize: LocalizeFunction) => {
const { info } = json;
const [endpoint, model = 'unknown'] = info?.split('|') ?? [];
const provider = (alternateName[endpoint ?? ''] as string | undefined) ?? endpoint ?? 'unknown';
return localize('com_error_illegal_model_request', { 0: model, 1: provider });
},
invalid_api_key:
'Invalid API key. Please check your API key and try again. You can do this by clicking on the model logo in the left corner of the textbox and selecting "Set Token" for the current selected endpoint. Thank you for your understanding.',
insufficient_quota:

View File

@@ -72,7 +72,7 @@ export default function ExportModal({
const { exportConversation } = useExportConversation({
conversation,
filename,
filename: filenamify(filename),
type,
includeOptions,
exportBranches,
@@ -95,7 +95,7 @@ export default function ExportModal({
<Input
id="filename"
value={filename}
onChange={(e) => setFileName(filenamify(e.target.value || ''))}
onChange={(e) => setFileName(e.target.value || '')}
placeholder={localize('com_nav_export_filename_placeholder')}
/>
</div>

View File

@@ -7,7 +7,7 @@ import BackupCodesItem from './BackupCodesItem';
import { useAuthContext } from '~/hooks';
function Account() {
const user = useAuthContext();
const { user } = useAuthContext();
return (
<div className="flex flex-col gap-3 p-1 text-sm text-text-primary">
@@ -17,12 +17,12 @@ function Account() {
<div className="pb-3">
<Avatar />
</div>
{user?.user?.provider === 'local' && (
{user?.provider === 'local' && (
<>
<div className="pb-3">
<EnableTwoFactorItem />
</div>
{user?.user?.twoFactorEnabled && (
{user?.twoFactorEnabled && (
<div className="pb-3">
<BackupCodesItem />
</div>

View File

@@ -39,8 +39,8 @@ const TwoFactorAuthentication: React.FC = () => {
const [secret, setSecret] = useState<string>('');
const [otpauthUrl, setOtpauthUrl] = useState<string>('');
const [downloaded, setDownloaded] = useState<boolean>(false);
const [disableToken, setDisableToken] = useState<string>('');
const [backupCodes, setBackupCodes] = useState<string[]>([]);
const [_disableToken, setDisableToken] = useState<string>('');
const [isDialogOpen, setDialogOpen] = useState<boolean>(false);
const [verificationToken, setVerificationToken] = useState<string>('');
const [phase, setPhase] = useState<Phase>(user?.twoFactorEnabled ? 'disable' : 'setup');
@@ -166,32 +166,26 @@ const TwoFactorAuthentication: React.FC = () => {
payload.token = token.trim();
}
verify2FAMutate(payload, {
disable2FAMutate(payload, {
onSuccess: () => {
disable2FAMutate(undefined, {
onSuccess: () => {
showToast({ message: localize('com_ui_2fa_disabled') });
setDialogOpen(false);
setUser(
(prev) =>
({
...prev,
totpSecret: '',
backupCodes: [],
twoFactorEnabled: false,
}) as TUser,
);
setPhase('setup');
setOtpauthUrl('');
},
onError: () =>
showToast({ message: localize('com_ui_2fa_disable_error'), status: 'error' }),
});
showToast({ message: localize('com_ui_2fa_disabled') });
setDialogOpen(false);
setUser(
(prev) =>
({
...prev,
totpSecret: '',
backupCodes: [],
twoFactorEnabled: false,
}) as TUser,
);
setPhase('setup');
setOtpauthUrl('');
},
onError: () => showToast({ message: localize('com_ui_2fa_invalid'), status: 'error' }),
});
},
[verify2FAMutate, disable2FAMutate, showToast, localize, setUser],
[disable2FAMutate, showToast, localize, setUser],
);
return (

View File

@@ -105,6 +105,8 @@ export const LangSelector = ({
{ value: 'nl-NL', label: localize('com_nav_lang_dutch') },
{ value: 'id-ID', label: localize('com_nav_lang_indonesia') },
{ value: 'fi-FI', label: localize('com_nav_lang_finnish') },
{ value: 'bo', label: localize('com_nav_lang_tibetan') },
{ value: 'uk-UA', label: localize('com_nav_lang_ukrainian') },
];
return (

View File

@@ -0,0 +1,380 @@
/**
* @jest-environment jsdom
*/
import * as React from 'react';
import { describe, it, expect, beforeEach, jest } from '@jest/globals';
import { render, waitFor, fireEvent } from '@testing-library/react';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import type { Agent } from 'librechat-data-provider';
// Mock toast context - define this after all mocks
let mockShowToast: jest.Mock;
// Mock notification severity enum before other imports
jest.mock('~/common/types', () => ({
NotificationSeverity: {
SUCCESS: 'success',
ERROR: 'error',
INFO: 'info',
WARNING: 'warning',
},
}));
// Mock store to prevent import errors
jest.mock('~/store/toast', () => ({
default: () => ({
showToast: jest.fn(),
}),
}));
jest.mock('~/store', () => {});
// Mock the data service to control network responses
jest.mock('librechat-data-provider', () => {
const actualModule = jest.requireActual('librechat-data-provider') as any;
return {
...actualModule,
dataService: {
updateAgent: jest.fn(),
},
Tools: {
execute_code: 'execute_code',
file_search: 'file_search',
web_search: 'web_search',
},
Constants: {
EPHEMERAL_AGENT_ID: 'ephemeral',
},
SystemRoles: {
ADMIN: 'ADMIN',
},
EModelEndpoint: {
agents: 'agents',
chatGPTBrowser: 'chatGPTBrowser',
gptPlugins: 'gptPlugins',
},
isAssistantsEndpoint: jest.fn(() => false),
};
});
jest.mock('@librechat/client', () => ({
Button: ({ children, onClick, ...props }: any) => (
<button onClick={onClick} {...props}>
{children}
</button>
),
useToastContext: () => ({
get showToast() {
return mockShowToast || jest.fn();
},
}),
}));
// Mock other dependencies
jest.mock('librechat-data-provider/react-query', () => ({
useGetModelsQuery: () => ({ data: {} }),
}));
jest.mock('~/utils', () => ({
createProviderOption: jest.fn((provider: string) => ({ value: provider, label: provider })),
getDefaultAgentFormValues: jest.fn(() => ({
id: '',
name: '',
description: '',
model: '',
provider: '',
})),
}));
jest.mock('~/hooks', () => ({
useSelectAgent: () => ({ onSelect: jest.fn() }),
useLocalize: () => (key: string) => key,
useAuthContext: () => ({ user: { id: 'user-123', role: 'USER' } }),
}));
jest.mock('~/Providers/AgentPanelContext', () => ({
useAgentPanelContext: () => ({
activePanel: 'builder',
agentsConfig: { allowedProviders: [] },
setActivePanel: jest.fn(),
endpointsConfig: {},
setCurrentAgentId: jest.fn(),
agent_id: 'agent-123',
}),
}));
jest.mock('~/common', () => ({
Panel: {
model: 'model',
builder: 'builder',
advanced: 'advanced',
},
}));
// Mock child components to simplify testing
jest.mock('./AgentPanelSkeleton', () => ({
__esModule: true,
default: () => <div>{`Loading...`}</div>,
}));
jest.mock('./Advanced/AdvancedPanel', () => ({
__esModule: true,
default: () => <div>{`Advanced Panel`}</div>,
}));
jest.mock('./AgentConfig', () => ({
__esModule: true,
default: () => <div>{`Agent Config`}</div>,
}));
jest.mock('./AgentSelect', () => ({
__esModule: true,
default: () => <div>{`Agent Select`}</div>,
}));
jest.mock('./ModelPanel', () => ({
__esModule: true,
default: () => <div>{`Model Panel`}</div>,
}));
// Mock AgentFooter to provide a save button
jest.mock('./AgentFooter', () => ({
__esModule: true,
default: () => (
<button type="submit" data-testid="save-agent-button">
{`Save Agent`}
</button>
),
}));
// Mock react-hook-form to capture form submission
let mockFormSubmitHandler: (() => void) | null = null;
jest.mock('react-hook-form', () => {
const actual = jest.requireActual('react-hook-form') as any;
return {
...actual,
useForm: () => {
const methods = actual.useForm({
defaultValues: {
id: 'agent-123',
name: 'Test Agent',
description: 'Test description',
model: 'gpt-4',
provider: 'openai',
tools: [],
execute_code: false,
file_search: false,
web_search: false,
},
});
return {
...methods,
handleSubmit: (onSubmit: any) => (e?: any) => {
e?.preventDefault?.();
mockFormSubmitHandler = () => onSubmit(methods.getValues());
return mockFormSubmitHandler;
},
};
},
FormProvider: ({ children }: any) => children,
useWatch: () => 'agent-123',
};
});
// Import after mocks
import { dataService } from 'librechat-data-provider';
import { useGetAgentByIdQuery } from '~/data-provider';
import AgentPanel from './AgentPanel';
// Mock useGetAgentByIdQuery
jest.mock('~/data-provider', () => {
const actual = jest.requireActual('~/data-provider') as any;
return {
...actual,
useGetAgentByIdQuery: jest.fn(),
useUpdateAgentMutation: actual.useUpdateAgentMutation,
};
});
// Test wrapper with QueryClient
const createWrapper = () => {
const queryClient = new QueryClient({
defaultOptions: {
queries: { retry: false },
mutations: { retry: false },
},
});
return ({ children }: { children: React.ReactNode }) => (
<QueryClientProvider client={queryClient}>{children}</QueryClientProvider>
);
};
// Test helpers
const setupMocks = () => {
const mockUseGetAgentByIdQuery = useGetAgentByIdQuery as jest.MockedFunction<
typeof useGetAgentByIdQuery
>;
const mockUpdateAgent = dataService.updateAgent as jest.MockedFunction<
typeof dataService.updateAgent
>;
return { mockUseGetAgentByIdQuery, mockUpdateAgent };
};
const mockAgentQuery = (
mockUseGetAgentByIdQuery: jest.MockedFunction<typeof useGetAgentByIdQuery>,
agent: Partial<Agent>,
) => {
mockUseGetAgentByIdQuery.mockReturnValue({
data: {
id: 'agent-123',
author: 'user-123',
isCollaborative: false,
...agent,
} as Agent,
isInitialLoading: false,
} as any);
};
const createMockAgent = (overrides: Partial<Agent> = {}): Agent =>
({
id: 'agent-123',
provider: 'openai',
model: 'gpt-4',
...overrides,
}) as Agent;
const renderAndSubmitForm = async () => {
const Wrapper = createWrapper();
const { container, rerender } = render(<AgentPanel />, { wrapper: Wrapper });
const form = container.querySelector('form');
expect(form).toBeTruthy();
fireEvent.submit(form!);
if (mockFormSubmitHandler) {
mockFormSubmitHandler();
}
return { container, rerender, form };
};
describe('AgentPanel - Update Agent Toast Messages', () => {
beforeEach(() => {
jest.clearAllMocks();
mockShowToast = jest.fn();
mockFormSubmitHandler = null;
});
describe('AgentPanel', () => {
it('should show "no changes" toast when version does not change', async () => {
const { mockUseGetAgentByIdQuery, mockUpdateAgent } = setupMocks();
// Mock the agent query with version 2
mockAgentQuery(mockUseGetAgentByIdQuery, {
name: 'Test Agent',
version: 2,
});
// Mock network response - same version
mockUpdateAgent.mockResolvedValue(createMockAgent({ name: 'Test Agent', version: 2 }));
await renderAndSubmitForm();
// Wait for the toast to be shown
await waitFor(() => {
expect(mockShowToast).toHaveBeenCalledWith({
message: 'com_ui_no_changes',
status: 'info',
});
});
});
it('should show "update success" toast when version changes', async () => {
const { mockUseGetAgentByIdQuery, mockUpdateAgent } = setupMocks();
// Mock the agent query with version 2
mockAgentQuery(mockUseGetAgentByIdQuery, {
name: 'Test Agent',
version: 2,
});
// Mock network response - different version
mockUpdateAgent.mockResolvedValue(createMockAgent({ name: 'Test Agent', version: 3 }));
await renderAndSubmitForm();
await waitFor(() => {
expect(mockShowToast).toHaveBeenCalledWith({
message: 'com_assistants_update_success Test Agent',
});
});
});
it('should show "update success" with default name when agent has no name', async () => {
const { mockUseGetAgentByIdQuery, mockUpdateAgent } = setupMocks();
// Mock the agent query without name
mockAgentQuery(mockUseGetAgentByIdQuery, {
version: 1,
});
// Mock network response - no name
mockUpdateAgent.mockResolvedValue(createMockAgent({ version: 2 }));
await renderAndSubmitForm();
await waitFor(() => {
expect(mockShowToast).toHaveBeenCalledWith({
message: 'com_assistants_update_success com_ui_agent',
});
});
});
it('should show "update success" when agent query has no version (undefined)', async () => {
const { mockUseGetAgentByIdQuery, mockUpdateAgent } = setupMocks();
// Mock the agent query with no version data
mockAgentQuery(mockUseGetAgentByIdQuery, {
name: 'Test Agent',
// No version property
});
mockUpdateAgent.mockResolvedValue(createMockAgent({ name: 'Test Agent', version: 1 }));
await renderAndSubmitForm();
await waitFor(() => {
expect(mockShowToast).toHaveBeenCalledWith({
message: 'com_assistants_update_success Test Agent',
});
});
});
it('should show error toast on update failure', async () => {
const { mockUseGetAgentByIdQuery, mockUpdateAgent } = setupMocks();
// Mock the agent query
mockAgentQuery(mockUseGetAgentByIdQuery, {
name: 'Test Agent',
version: 1,
});
// Mock network error
mockUpdateAgent.mockRejectedValue(new Error('Update failed'));
await renderAndSubmitForm();
await waitFor(() => {
expect(mockShowToast).toHaveBeenCalledWith({
message: 'com_agents_update_error com_ui_error: Update failed',
status: 'error',
});
});
});
});
});

View File

@@ -1,5 +1,5 @@
import { Plus } from 'lucide-react';
import React, { useMemo, useCallback } from 'react';
import React, { useMemo, useCallback, useRef } from 'react';
import { Button, useToastContext } from '@librechat/client';
import { useWatch, useForm, FormProvider } from 'react-hook-form';
import { useGetModelsQuery } from 'librechat-data-provider/react-query';
@@ -54,6 +54,7 @@ export default function AgentPanel() {
const { control, handleSubmit, reset } = methods;
const agent_id = useWatch({ control, name: 'id' });
const previousVersionRef = useRef<number | undefined>();
const allowedProviders = useMemo(
() => new Set(agentsConfig?.allowedProviders),
@@ -77,50 +78,29 @@ export default function AgentPanel() {
/* Mutations */
const update = useUpdateAgentMutation({
onMutate: () => {
// Store the current version before mutation
previousVersionRef.current = agentQuery.data?.version;
},
onSuccess: (data) => {
showToast({
message: `${localize('com_assistants_update_success')} ${
data.name ?? localize('com_ui_agent')
}`,
});
// Check if agent version is the same (no changes were made)
if (previousVersionRef.current !== undefined && data.version === previousVersionRef.current) {
showToast({
message: localize('com_ui_no_changes'),
status: 'info',
});
} else {
showToast({
message: `${localize('com_assistants_update_success')} ${
data.name ?? localize('com_ui_agent')
}`,
});
}
// Clear the ref after use
previousVersionRef.current = undefined;
},
onError: (err) => {
const error = err as Error & {
statusCode?: number;
details?: { duplicateVersion?: any; versionIndex?: number };
response?: { status?: number; data?: any };
};
const isDuplicateVersionError =
(error.statusCode === 409 && error.details?.duplicateVersion) ||
(error.response?.status === 409 && error.response?.data?.details?.duplicateVersion);
if (isDuplicateVersionError) {
let versionIndex: number | undefined = undefined;
if (error.details?.versionIndex !== undefined) {
versionIndex = error.details.versionIndex;
} else if (error.response?.data?.details?.versionIndex !== undefined) {
versionIndex = error.response.data.details.versionIndex;
}
if (versionIndex === undefined || versionIndex < 0) {
showToast({
message: localize('com_agents_update_error'),
status: 'error',
duration: 5000,
});
} else {
showToast({
message: localize('com_ui_agent_version_duplicate', { versionIndex: versionIndex + 1 }),
status: 'error',
duration: 10000,
});
}
return;
}
const error = err as Error;
showToast({
message: `${localize('com_agents_update_error')}${
error.message ? ` ${localize('com_ui_error')}: ${error.message}` : ''

View File

@@ -6,8 +6,8 @@ import { Constants, QueryKeys } from 'librechat-data-provider';
import { useUpdateUserPluginsMutation } from 'librechat-data-provider/react-query';
import type { TUpdateUserPlugins } from 'librechat-data-provider';
import ServerInitializationSection from '~/components/MCP/ServerInitializationSection';
import CustomUserVarsSection from '~/components/MCP/CustomUserVarsSection';
import { useMCPConnectionStatusQuery } from '~/data-provider/Tools/queries';
import CustomUserVarsSection from '~/components/MCP/CustomUserVarsSection';
import BadgeRowProvider from '~/Providers/BadgeRowContext';
import { useGetStartupConfig } from '~/data-provider';
import MCPPanelSkeleton from './MCPPanelSkeleton';
@@ -127,20 +127,12 @@ function MCPPanelContent() {
const serverStatus = connectionStatus[selectedServerNameForEditing];
return (
<div className="h-auto max-w-full overflow-x-hidden p-3">
<Button
variant="outline"
onClick={handleGoBackToList}
className="mb-3 flex items-center px-3 py-2 text-sm"
>
<div className="h-auto max-w-full space-y-4 overflow-x-hidden py-2">
<Button variant="outline" onClick={handleGoBackToList} size="sm">
<ChevronLeft className="mr-1 h-4 w-4" />
{localize('com_ui_back')}
</Button>
<h3 className="mb-3 text-lg font-medium">
{localize('com_sidepanel_mcp_variables_for', { '0': serverBeingEdited.serverName })}
</h3>
<div className="mb-4">
<CustomUserVarsSection
serverName={selectedServerNameForEditing}
@@ -160,6 +152,7 @@ function MCPPanelContent() {
</div>
<ServerInitializationSection
sidePanel={true}
serverName={selectedServerNameForEditing}
requiresOAuth={serverStatus?.requiresOAuth || false}
hasCustomUserVars={
@@ -172,7 +165,7 @@ function MCPPanelContent() {
} else {
// Server List View
return (
<div className="h-auto max-w-full overflow-x-hidden p-3">
<div className="h-auto max-w-full overflow-x-hidden py-2">
<div className="space-y-2">
{mcpServerDefinitions.map((server) => {
const serverStatus = connectionStatus[server.serverName];
@@ -189,7 +182,7 @@ function MCPPanelContent() {
<span>{server.serverName}</span>
{serverStatus && (
<span
className={`rounded px-2 py-0.5 text-xs ${
className={`rounded-xl px-2 py-0.5 text-xs ${
isConnected
? 'bg-green-100 text-green-700 dark:bg-green-900 dark:text-green-300'
: 'bg-gray-100 text-gray-700 dark:bg-gray-800 dark:text-gray-300'

View File

@@ -43,11 +43,7 @@ export const useCreateAgentMutation = (
*/
export const useUpdateAgentMutation = (
options?: t.UpdateAgentMutationOptions,
): UseMutationResult<
t.Agent,
t.DuplicateVersionError,
{ agent_id: string; data: t.AgentUpdateParams }
> => {
): UseMutationResult<t.Agent, Error, { agent_id: string; data: t.AgentUpdateParams }> => {
const queryClient = useQueryClient();
return useMutation(
({ agent_id, data }: { agent_id: string; data: t.AgentUpdateParams }) => {
@@ -59,8 +55,7 @@ export const useUpdateAgentMutation = (
{
onMutate: (variables) => options?.onMutate?.(variables),
onError: (error, variables, context) => {
const typedError = error as t.DuplicateVersionError;
return options?.onError?.(typedError, variables, context);
return options?.onError?.(error, variables, context);
},
onSuccess: (updatedAgent, variables, context) => {
const listRes = queryClient.getQueryData<t.AgentListResponse>([

View File

@@ -134,12 +134,12 @@ export const useConfirmTwoFactorMutation = (): UseMutationResult<
export const useDisableTwoFactorMutation = (): UseMutationResult<
t.TDisable2FAResponse,
unknown,
void,
t.TDisable2FARequest | undefined,
unknown
> => {
const queryClient = useQueryClient();
return useMutation(() => dataService.disableTwoFactor(), {
onSuccess: (data) => {
return useMutation((payload?: t.TDisable2FARequest) => dataService.disableTwoFactor(payload), {
onSuccess: () => {
queryClient.setQueryData([QueryKeys.user, '2fa'], null);
},
});

View File

@@ -55,9 +55,10 @@ export const useUpdateConversationMutation = (
return useMutation(
(payload: t.TUpdateConversationRequest) => dataService.updateConversation(payload),
{
onSuccess: (updatedConvo) => {
queryClient.setQueryData([QueryKeys.conversation, id], updatedConvo);
updateConvoInAllQueries(queryClient, id, () => updatedConvo);
onSuccess: (updatedConvo, payload) => {
const targetId = payload.conversationId || id;
queryClient.setQueryData([QueryKeys.conversation, targetId], updatedConvo);
updateConvoInAllQueries(queryClient, targetId, () => updatedConvo);
},
},
);

View File

@@ -1,14 +1,15 @@
import { useMemo, useState, useEffect, useRef } from 'react';
import { Constants } from 'librechat-data-provider';
import { useRecoilState, useRecoilValue, useResetRecoilState } from 'recoil';
import { getLatestText, logger } from '~/utils';
import { useChatContext } from '~/Providers';
import { logger } from '~/utils';
import { useArtifactsContext } from '~/Providers';
import { getKey } from '~/utils/artifacts';
import store from '~/store';
export default function useArtifacts() {
const [activeTab, setActiveTab] = useState('preview');
const { isSubmitting, latestMessage, conversation } = useChatContext();
const { isSubmitting, latestMessageId, latestMessageText, conversationId } =
useArtifactsContext();
const artifacts = useRecoilValue(store.artifactsState);
const resetArtifacts = useResetRecoilState(store.artifactsState);
@@ -31,26 +32,23 @@ export default function useArtifacts() {
const resetState = () => {
resetArtifacts();
resetCurrentArtifactId();
prevConversationIdRef.current = conversation?.conversationId ?? null;
prevConversationIdRef.current = conversationId;
lastRunMessageIdRef.current = null;
lastContentRef.current = null;
hasEnclosedArtifactRef.current = false;
};
if (
conversation?.conversationId !== prevConversationIdRef.current &&
prevConversationIdRef.current != null
) {
if (conversationId !== prevConversationIdRef.current && prevConversationIdRef.current != null) {
resetState();
} else if (conversation?.conversationId === Constants.NEW_CONVO) {
} else if (conversationId === Constants.NEW_CONVO) {
resetState();
}
prevConversationIdRef.current = conversation?.conversationId ?? null;
prevConversationIdRef.current = conversationId;
/** Resets artifacts when unmounting */
return () => {
logger.log('artifacts_visibility', 'Unmounting artifacts');
resetState();
};
}, [conversation?.conversationId, resetArtifacts, resetCurrentArtifactId]);
}, [conversationId, resetArtifacts, resetCurrentArtifactId]);
useEffect(() => {
if (orderedArtifactIds.length > 0) {
@@ -66,7 +64,7 @@ export default function useArtifacts() {
if (orderedArtifactIds.length === 0) {
return;
}
if (latestMessage == null) {
if (latestMessageId == null) {
return;
}
const latestArtifactId = orderedArtifactIds[orderedArtifactIds.length - 1];
@@ -78,7 +76,6 @@ export default function useArtifacts() {
setCurrentArtifactId(latestArtifactId);
lastContentRef.current = latestArtifact?.content ?? null;
const latestMessageText = getLatestText(latestMessage);
const hasEnclosedArtifact =
/:::artifact(?:\{[^}]*\})?(?:\s|\n)*(?:```[\s\S]*?```(?:\s|\n)*)?:::/m.test(
latestMessageText.trim(),
@@ -95,15 +92,22 @@ export default function useArtifacts() {
hasAutoSwitchedToCodeRef.current = true;
}
}
}, [setCurrentArtifactId, isSubmitting, orderedArtifactIds, artifacts, latestMessage]);
}, [
artifacts,
isSubmitting,
latestMessageId,
latestMessageText,
orderedArtifactIds,
setCurrentArtifactId,
]);
useEffect(() => {
if (latestMessage?.messageId !== lastRunMessageIdRef.current) {
lastRunMessageIdRef.current = latestMessage?.messageId ?? null;
if (latestMessageId !== lastRunMessageIdRef.current) {
lastRunMessageIdRef.current = latestMessageId;
hasEnclosedArtifactRef.current = false;
hasAutoSwitchedToCodeRef.current = false;
}
}, [latestMessage]);
}, [latestMessageId]);
const currentArtifact = currentArtifactId != null ? artifacts?.[currentArtifactId] : null;
@@ -131,7 +135,6 @@ export default function useArtifacts() {
isMermaid,
setActiveTab,
currentIndex,
isSubmitting,
cycleArtifact,
currentArtifact,
orderedArtifactIds,

Some files were not shown because too many files have changed in this diff Show More