Compare commits

..

1 Commits

Author SHA1 Message Date
Dustin Healy
66dc48c8a0 🔧 WIP: Enhance Bedrock endpoint configuration with user-provided credentials. (Still needs to implement user_provided bearer token support, but the UI is there for it)
- Added support for user-provided AWS credentials (Access Key ID, Secret Access Key, Session Token, Bearer Token) in the Bedrock endpoint configuration.
- Localized new strings for Bedrock configuration in translation files.
2025-07-26 18:14:02 -07:00
772 changed files with 4499 additions and 18599 deletions

View File

@@ -442,8 +442,6 @@ OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_USERNAME_CLAIM=
# Set to determine which user info property returned from OpenID Provider to store as the User's name
OPENID_NAME_CLAIM=
# Optional audience parameter for OpenID authorization requests
OPENID_AUDIENCE=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=

View File

@@ -1,11 +1,6 @@
name: Publish `@librechat/client` to NPM
on:
push:
branches:
- main
paths:
- 'packages/client/package.json'
workflow_dispatch:
inputs:
reason:
@@ -22,37 +17,16 @@ jobs:
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Install client dependencies
run: cd packages/client && npm ci
- name: Build client
run: cd packages/client && npm run build
- name: Set up npm authentication
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
- name: Check version change
id: check
working-directory: packages/client
node-version: '18.x'
- name: Check if client package exists
run: |
PACKAGE_VERSION=$(node -p "require('./package.json').version")
PUBLISHED_VERSION=$(npm view @librechat/client version 2>/dev/null || echo "0.0.0")
if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
echo "No version change, skipping publish"
echo "skip=true" >> $GITHUB_OUTPUT
if [ -d "packages/client" ]; then
echo "Client package directory found"
else
echo "Version changed, proceeding with publish"
echo "skip=false" >> $GITHUB_OUTPUT
echo "Client package directory not found - workflow ready for future use"
exit 0
fi
- name: Pack package
if: steps.check.outputs.skip != 'true'
working-directory: packages/client
run: npm pack
- name: Publish
if: steps.check.outputs.skip != 'true'
working-directory: packages/client
run: npm publish *.tgz --access public
- name: Placeholder for future publishing
run: echo "Client package publishing workflow is ready"

View File

@@ -22,7 +22,7 @@ jobs:
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
node-version: '18.x'
- name: Install dependencies
run: cd packages/data-schemas && npm ci

View File

@@ -4,13 +4,12 @@ name: Build Helm Charts on Tag
on:
push:
tags:
- "chart-*"
- "*"
jobs:
release:
permissions:
contents: write
packages: write
runs-on: ubuntu-latest
steps:
- name: Checkout
@@ -27,49 +26,15 @@ jobs:
uses: azure/setup-helm@v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Build Subchart Deps
run: |
cd helm/librechat
helm dependency build
cd ../librechat-rag-api
helm dependency build
cd helm/librechat-rag-api
helm dependency build
- name: Get Chart Version
id: chart-version
run: |
CHART_VERSION=$(echo "${{ github.ref_name }}" | cut -d'-' -f2)
echo "CHART_VERSION=${CHART_VERSION}" >> "$GITHUB_OUTPUT"
# Log in to GitHub Container Registry
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Run Helm OCI Charts Releaser
# This is for the librechat chart
- name: Release Helm OCI Charts for librechat
uses: appany/helm-oci-chart-releaser@v0.4.2
with:
name: librechat
repository: ${{ github.actor }}/librechat-chart
tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
path: helm/librechat
registry: ghcr.io
registry_username: ${{ github.actor }}
registry_password: ${{ secrets.GITHUB_TOKEN }}
# this is for the librechat-rag-api chart
- name: Release Helm OCI Charts for librechat-rag-api
uses: appany/helm-oci-chart-releaser@v0.4.2
with:
name: librechat-rag-api
repository: ${{ github.actor }}/librechat-chart
tag: ${{ steps.chart-version.outputs.CHART_VERSION }}
path: helm/librechat-rag-api
registry: ghcr.io
registry_username: ${{ github.actor }}
registry_password: ${{ secrets.GITHUB_TOKEN }}
charts_dir: helm
skip_existing: true
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -6,7 +6,6 @@ on:
- "client/src/**"
- "api/**"
- "packages/data-provider/src/**"
- "packages/client/**"
jobs:
detect-unused-i18n-keys:
@@ -24,7 +23,7 @@ jobs:
# Define paths
I18N_FILE="client/src/locales/en/translation.json"
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src" "packages/client")
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src")
# Check if translation file exists
if [[ ! -f "$I18N_FILE" ]]; then

View File

@@ -48,7 +48,7 @@ jobs:
# 2. Download translation files from locize.
- name: Download Translations from locize
uses: locize/download@v2
uses: locize/download@v1
with:
project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
path: "client/src/locales"

View File

@@ -7,7 +7,6 @@ on:
- 'package-lock.json'
- 'client/**'
- 'api/**'
- 'packages/client/**'
jobs:
detect-unused-packages:
@@ -29,7 +28,7 @@ jobs:
- name: Validate JSON files
run: |
for FILE in package.json client/package.json api/package.json packages/client/package.json; do
for FILE in package.json client/package.json api/package.json; do
if [[ -f "$FILE" ]]; then
jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1)
fi
@@ -64,31 +63,12 @@ jobs:
local folder=$1
local output_file=$2
if [[ -d "$folder" ]]; then
# Extract require() statements
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,mjs,cjs} | \
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
# Extract ES6 imports - various patterns
# import x from 'module'
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,mjs,cjs} | \
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# import 'module' (side-effect imports)
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# export { x } from 'module' or export * from 'module'
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# import type { x } from 'module' (TypeScript)
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# Remove subpath imports but keep the base package
# e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
sort -u "$output_file" -o "$output_file"
else
touch "$output_file"
@@ -98,33 +78,6 @@ jobs:
extract_deps_from_code "." root_used_code.txt
extract_deps_from_code "client" client_used_code.txt
extract_deps_from_code "api" api_used_code.txt
# Extract dependencies used by @librechat/client package
extract_deps_from_code "packages/client" packages_client_used_code.txt
- name: Get @librechat/client dependencies
id: get-librechat-client-deps
run: |
if [[ -f "packages/client/package.json" ]]; then
# Get all dependencies from @librechat/client (dependencies, devDependencies, and peerDependencies)
DEPS=$(jq -r '.dependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
# Combine all dependencies
echo "$DEPS" > librechat_client_deps.txt
echo "$DEV_DEPS" >> librechat_client_deps.txt
echo "$PEER_DEPS" >> librechat_client_deps.txt
# Also include dependencies that are imported in packages/client
cat packages_client_used_code.txt >> librechat_client_deps.txt
# Remove empty lines and sort
grep -v '^$' librechat_client_deps.txt | sort -u > temp_deps.txt
mv temp_deps.txt librechat_client_deps.txt
else
touch librechat_client_deps.txt
fi
- name: Extract Workspace Dependencies
id: extract-workspace-deps

3
.gitignore vendored
View File

@@ -13,9 +13,6 @@ pids
*.seed
.git
# CI/CD data
test-image*
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

View File

@@ -1,4 +1,4 @@
# v0.8.0-rc2
# v0.7.9
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.8.0-rc2
# v0.7.9
# Base for all builds
FROM node:20-alpine AS base-min
@@ -16,7 +16,6 @@ COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/api/package*.json ./packages/api/
COPY packages/data-schemas/package*.json ./packages/data-schemas/
COPY packages/client/package*.json ./packages/client/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
@@ -46,19 +45,11 @@ COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/d
COPY --from=data-schemas-build /app/packages/data-schemas/dist /app/packages/data-schemas/dist
RUN npm run build
# Build `client` package
FROM base AS client-package-build
WORKDIR /app/packages/client
COPY packages/client ./
RUN npm run build
# Client build
FROM base AS client-build
WORKDIR /app/client
COPY client ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist
COPY --from=client-package-build /app/packages/client/src /app/packages/client/src
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build

View File

@@ -27,7 +27,6 @@ const {
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { encodeAndFormatDocuments } = require('~/server/services/Files/documents');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -313,33 +312,6 @@ class AnthropicClient extends BaseClient {
return files;
}
async addDocuments(message, attachments) {
// Only process documents
const documentResult = await encodeAndFormatDocuments(
this.options.req,
attachments,
EModelEndpoint.anthropic,
);
message.documents =
documentResult.documents && documentResult.documents.length
? documentResult.documents
: undefined;
return documentResult.files;
}
async processAttachments(message, attachments) {
// Process both images and documents
const [imageFiles, documentFiles] = await Promise.all([
this.addImageURLs(message, attachments),
this.addDocuments(message, attachments),
]);
// Combine files from both processors
return [...imageFiles, ...documentFiles];
}
/**
* @param {object} params
* @param {number} params.promptTokens
@@ -410,7 +382,7 @@ class AnthropicClient extends BaseClient {
};
}
const files = await this.processAttachments(latestMessage, attachments);
const files = await this.addImageURLs(latestMessage, attachments);
this.options.attachments = files;
}
@@ -969,7 +941,7 @@ class AnthropicClient extends BaseClient {
const content = `<conversation_context>
${convo}
</conversation_context>
Please generate a title for this conversation.`;
const titleMessage = { role: 'user', content };

View File

@@ -1233,7 +1233,7 @@ class BaseClient {
{},
);
await this.processAttachments(message, files, this.visionMode);
await this.addImageURLs(message, files, this.visionMode);
this.message_file_map[message.messageId] = files;
return message;

View File

@@ -268,7 +268,7 @@ class GoogleClient extends BaseClient {
const formattedMessages = [];
const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
const files = await this.processAttachments(latestMessage, attachments, VisionModes.generative);
const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative);
this.options.attachments = files;
messages[messages.length - 1] = latestMessage;
@@ -312,20 +312,6 @@ class GoogleClient extends BaseClient {
return files;
}
// eslint-disable-next-line no-unused-vars
async addDocuments(message, attachments) {
// GoogleClient doesn't support document processing yet
// Return empty results for consistency
return [];
}
async processAttachments(message, attachments, mode = '') {
// For GoogleClient, only process images
const imageFiles = await this.addImageURLs(message, attachments, mode);
const documentFiles = await this.addDocuments(message, attachments);
return [...imageFiles, ...documentFiles];
}
/**
* Builds the augmented prompt for attachments
* TODO: Add File API Support
@@ -359,7 +345,7 @@ class GoogleClient extends BaseClient {
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
const files = await this.processAttachments(latestMessage, attachments);
const files = await this.addImageURLs(latestMessage, attachments);
this.options.attachments = files;

View File

@@ -372,19 +372,6 @@ class OpenAIClient extends BaseClient {
return files;
}
async addDocuments(message, attachments) {
// OpenAI doesn't support native document processing yet
// Return empty results for consistency
return [];
}
async processAttachments(message, attachments) {
// For OpenAI, only process images
const imageFiles = await this.addImageURLs(message, attachments);
const documentFiles = await this.addDocuments(message, attachments);
return [...imageFiles, ...documentFiles];
}
async buildMessages(messages, parentMessageId, { promptPrefix = null }, opts) {
let orderedMessages = this.constructor.getMessagesForConversation({
messages,
@@ -413,7 +400,7 @@ class OpenAIClient extends BaseClient {
};
}
const files = await this.processAttachments(
const files = await this.addImageURLs(
orderedMessages[orderedMessages.length - 1],
attachments,
);
@@ -1235,9 +1222,7 @@ ${convo}
}
if (this.isOmni === true && modelOptions.max_tokens != null) {
const paramName =
modelOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
modelOptions[paramName] = modelOptions.max_tokens;
modelOptions.max_completion_tokens = modelOptions.max_tokens;
delete modelOptions.max_tokens;
}
if (this.isOmni === true && modelOptions.temperature != null) {

View File

@@ -3,61 +3,24 @@ const { EModelEndpoint, ContentTypes } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('@langchain/core/messages');
/**
* Formats a message with document attachments for specific endpoints.
* Formats a message to OpenAI Vision API payload format.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {Array<Object>} [params.documents] - The document attachments for the message.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @returns {(Object)} - The formatted message.
*/
const formatDocumentMessage = ({ message, documents, endpoint }) => {
const contentParts = [];
// Add documents first (for Anthropic PDFs)
if (documents && documents.length > 0) {
contentParts.push(...documents);
}
// Add text content
contentParts.push({ type: ContentTypes.TEXT, text: message.content });
if (endpoint === EModelEndpoint.anthropic) {
message.content = contentParts;
return message;
}
// For other endpoints, might need different handling
message.content = contentParts;
return message;
};
/**
* Formats a message with vision capabilities (image_urls) for specific endpoints.
*
* @param {Object} params - The parameters for formatting.
* @param {Object} params.message - The message object to format.
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @returns {(Object)} - The formatted message.
*/
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
const contentParts = [];
// Add images
if (image_urls && image_urls.length > 0) {
contentParts.push(...image_urls);
}
// Add text content
contentParts.push({ type: ContentTypes.TEXT, text: message.content });
if (endpoint === EModelEndpoint.anthropic) {
message.content = contentParts;
message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }];
return message;
}
message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls];
return message;
};
@@ -95,18 +58,7 @@ const formatMessage = ({ message, userName, assistantName, endpoint, langChain =
content,
};
const { image_urls, documents } = message;
// Handle documents
if (Array.isArray(documents) && documents.length > 0 && role === 'user') {
return formatDocumentMessage({
message: formattedMessage,
documents: message.documents,
endpoint,
});
}
// Handle images
const { image_urls } = message;
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
return formatVisionMessage({
message: formattedMessage,
@@ -194,21 +146,7 @@ const formatAgentMessages = (payload) => {
message.content = [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: message.content }];
}
if (message.role !== 'assistant') {
// Check if message has documents and preserve array structure
const hasDocuments =
Array.isArray(message.content) &&
message.content.some((part) => part && part.type === 'document');
if (hasDocuments && message.role === 'user') {
// For user messages with documents, create HumanMessage directly with array content
messages.push(new HumanMessage({ content: message.content }));
} else if (hasDocuments && message.role === 'system') {
// For system messages with documents, create SystemMessage directly with array content
messages.push(new SystemMessage({ content: message.content }));
} else {
// Use regular formatting for messages without documents
messages.push(formatMessage({ message, langChain: true }));
}
messages.push(formatMessage({ message, langChain: true }));
continue;
}
@@ -301,8 +239,6 @@ const formatAgentMessages = (payload) => {
module.exports = {
formatMessage,
formatDocumentMessage,
formatVisionMessage,
formatFromLangChain,
formatAgentMessages,
formatLangChainMessages,

View File

@@ -3,8 +3,8 @@ const path = require('path');
const OpenAI = require('openai');
const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { ProxyAgent } = require('undici');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
@@ -46,10 +46,7 @@ class DALLE3 extends Tool {
}
if (process.env.PROXY) {
const proxyAgent = new ProxyAgent(process.env.PROXY);
config.fetchOptions = {
dispatcher: proxyAgent,
};
config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
/** @type {OpenAI} */
@@ -166,8 +163,7 @@ Error Message: ${error.message}`);
if (this.isAgent) {
let fetchOptions = {};
if (process.env.PROXY) {
const proxyAgent = new ProxyAgent(process.env.PROXY);
fetchOptions.dispatcher = proxyAgent;
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
}
const imageResponse = await fetch(theImageUrl, fetchOptions);
const arrayBuffer = await imageResponse.arrayBuffer();

View File

@@ -3,10 +3,10 @@ const axios = require('axios');
const { v4 } = require('uuid');
const OpenAI = require('openai');
const FormData = require('form-data');
const { ProxyAgent } = require('undici');
const { tool } = require('@langchain/core/tools');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { extractBaseURL } = require('~/utils');
@@ -189,10 +189,7 @@ function createOpenAIImageTools(fields = {}) {
}
const clientConfig = { ...closureConfig };
if (process.env.PROXY) {
const proxyAgent = new ProxyAgent(process.env.PROXY);
clientConfig.fetchOptions = {
dispatcher: proxyAgent,
};
clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
/** @type {OpenAI} */
@@ -338,10 +335,7 @@ Error Message: ${error.message}`);
const clientConfig = { ...closureConfig };
if (process.env.PROXY) {
const proxyAgent = new ProxyAgent(process.env.PROXY);
clientConfig.fetchOptions = {
dispatcher: proxyAgent,
};
clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
const formData = new FormData();
@@ -453,19 +447,6 @@ Error Message: ${error.message}`);
baseURL,
};
if (process.env.PROXY) {
try {
const url = new URL(process.env.PROXY);
axiosConfig.proxy = {
host: url.hostname.replace(/^\[|\]$/g, ''),
port: url.port ? parseInt(url.port, 10) : undefined,
protocol: url.protocol.replace(':', ''),
};
} catch (error) {
logger.error('Error parsing proxy URL:', error);
}
}
if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) {
axiosConfig.params = {
'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION,

View File

@@ -1,94 +0,0 @@
const DALLE3 = require('../DALLE3');
const { ProxyAgent } = require('undici');
const processFileURL = jest.fn();
jest.mock('~/server/services/Files/images', () => ({
getImageBasename: jest.fn().mockImplementation((url) => {
const parts = url.split('/');
const lastPart = parts.pop();
const imageExtensionRegex = /\.(jpg|jpeg|png|gif|bmp|tiff|svg)$/i;
if (imageExtensionRegex.test(lastPart)) {
return lastPart;
}
return '';
}),
}));
jest.mock('fs', () => {
return {
existsSync: jest.fn(),
mkdirSync: jest.fn(),
promises: {
writeFile: jest.fn(),
readFile: jest.fn(),
unlink: jest.fn(),
},
};
});
jest.mock('path', () => {
return {
resolve: jest.fn(),
join: jest.fn(),
relative: jest.fn(),
extname: jest.fn().mockImplementation((filename) => {
return filename.slice(filename.lastIndexOf('.'));
}),
};
});
describe('DALLE3 Proxy Configuration', () => {
let originalEnv;
beforeAll(() => {
originalEnv = { ...process.env };
});
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should configure ProxyAgent in fetchOptions.dispatcher when PROXY env is set', () => {
// Set proxy environment variable
process.env.PROXY = 'http://proxy.example.com:8080';
process.env.DALLE_API_KEY = 'test-api-key';
// Create instance
const dalleWithProxy = new DALLE3({ processFileURL });
// Check that the openai client exists
expect(dalleWithProxy.openai).toBeDefined();
// Check that _options exists and has fetchOptions with a dispatcher
expect(dalleWithProxy.openai._options).toBeDefined();
expect(dalleWithProxy.openai._options.fetchOptions).toBeDefined();
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeDefined();
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent);
});
it('should not configure ProxyAgent when PROXY env is not set', () => {
// Ensure PROXY is not set
delete process.env.PROXY;
process.env.DALLE_API_KEY = 'test-api-key';
// Create instance
const dalleWithoutProxy = new DALLE3({ processFileURL });
// Check that the openai client exists
expect(dalleWithoutProxy.openai).toBeDefined();
// Check that _options exists but fetchOptions either doesn't exist or doesn't have a dispatcher
expect(dalleWithoutProxy.openai._options).toBeDefined();
// fetchOptions should either not exist or not have a dispatcher
if (dalleWithoutProxy.openai._options.fetchOptions) {
expect(dalleWithoutProxy.openai._options.fetchOptions.dispatcher).toBeUndefined();
}
});
});

View File

@@ -44,14 +44,6 @@ const cacheConfig = {
REDIS_KEY_PREFIX: process.env[REDIS_KEY_PREFIX_VAR] || REDIS_KEY_PREFIX || '',
REDIS_MAX_LISTENERS: math(process.env.REDIS_MAX_LISTENERS, 40),
REDIS_PING_INTERVAL: math(process.env.REDIS_PING_INTERVAL, 0),
/** Max delay between reconnection attempts in ms */
REDIS_RETRY_MAX_DELAY: math(process.env.REDIS_RETRY_MAX_DELAY, 3000),
/** Max number of reconnection attempts (0 = infinite) */
REDIS_RETRY_MAX_ATTEMPTS: math(process.env.REDIS_RETRY_MAX_ATTEMPTS, 10),
/** Connection timeout in ms */
REDIS_CONNECT_TIMEOUT: math(process.env.REDIS_CONNECT_TIMEOUT, 10000),
/** Queue commands when disconnected */
REDIS_ENABLE_OFFLINE_QUEUE: isEnabled(process.env.REDIS_ENABLE_OFFLINE_QUEUE ?? 'true'),
CI: isEnabled(process.env.CI),
DEBUG_MEMORY_CACHE: isEnabled(process.env.DEBUG_MEMORY_CACHE),

View File

@@ -1,13 +1,12 @@
const KeyvRedis = require('@keyv/redis').default;
const { Keyv } = require('keyv');
const { RedisStore } = require('rate-limit-redis');
const { cacheConfig } = require('./cacheConfig');
const { keyvRedisClient, ioredisClient, GLOBAL_PREFIX_SEPARATOR } = require('./redisClients');
const { Time } = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { RedisStore: ConnectRedis } = require('connect-redis');
const MemoryStore = require('memorystore')(require('express-session'));
const { keyvRedisClient, ioredisClient, GLOBAL_PREFIX_SEPARATOR } = require('./redisClients');
const { cacheConfig } = require('./cacheConfig');
const { violationFile } = require('./keyvFiles');
const { RedisStore } = require('rate-limit-redis');
/**
* Creates a cache instance using Redis or a fallback store. Suitable for general caching needs.
@@ -21,21 +20,11 @@ const standardCache = (namespace, ttl = undefined, fallbackStore = undefined) =>
cacheConfig.USE_REDIS &&
!cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES?.includes(namespace)
) {
try {
const keyvRedis = new KeyvRedis(keyvRedisClient);
const cache = new Keyv(keyvRedis, { namespace, ttl });
keyvRedis.namespace = cacheConfig.REDIS_KEY_PREFIX;
keyvRedis.keyPrefixSeparator = GLOBAL_PREFIX_SEPARATOR;
cache.on('error', (err) => {
logger.error(`Cache error in namespace ${namespace}:`, err);
});
return cache;
} catch (err) {
logger.error(`Failed to create Redis cache for namespace ${namespace}:`, err);
throw err;
}
const keyvRedis = new KeyvRedis(keyvRedisClient);
const cache = new Keyv(keyvRedis, { namespace, ttl });
keyvRedis.namespace = cacheConfig.REDIS_KEY_PREFIX;
keyvRedis.keyPrefixSeparator = GLOBAL_PREFIX_SEPARATOR;
return cache;
}
if (fallbackStore) return new Keyv({ store: fallbackStore, namespace, ttl });
return new Keyv({ namespace, ttl });
@@ -61,13 +50,7 @@ const violationCache = (namespace, ttl = undefined) => {
const sessionCache = (namespace, ttl = undefined) => {
namespace = namespace.endsWith(':') ? namespace : `${namespace}:`;
if (!cacheConfig.USE_REDIS) return new MemoryStore({ ttl, checkPeriod: Time.ONE_DAY });
const store = new ConnectRedis({ client: ioredisClient, ttl, prefix: namespace });
if (ioredisClient) {
ioredisClient.on('error', (err) => {
logger.error(`Session store Redis error for namespace ${namespace}:`, err);
});
}
return store;
return new ConnectRedis({ client: ioredisClient, ttl, prefix: namespace });
};
/**
@@ -79,30 +62,8 @@ const limiterCache = (prefix) => {
if (!prefix) throw new Error('prefix is required');
if (!cacheConfig.USE_REDIS) return undefined;
prefix = prefix.endsWith(':') ? prefix : `${prefix}:`;
try {
if (!ioredisClient) {
logger.warn(`Redis client not available for rate limiter with prefix ${prefix}`);
return undefined;
}
return new RedisStore({ sendCommand, prefix });
} catch (err) {
logger.error(`Failed to create Redis rate limiter for prefix ${prefix}:`, err);
return undefined;
}
};
const sendCommand = (...args) => {
if (!ioredisClient) {
logger.warn('Redis client not available for command execution');
return Promise.reject(new Error('Redis client not available'));
}
return ioredisClient.call(...args).catch((err) => {
logger.error('Redis command execution failed:', err);
throw err;
});
return new RedisStore({ sendCommand, prefix });
};
const sendCommand = (...args) => ioredisClient?.call(...args);
module.exports = { standardCache, sessionCache, violationCache, limiterCache };

View File

@@ -6,17 +6,13 @@ const mockKeyvRedis = {
keyPrefixSeparator: '',
};
const mockKeyv = jest.fn().mockReturnValue({
mock: 'keyv',
on: jest.fn(),
});
const mockKeyv = jest.fn().mockReturnValue({ mock: 'keyv' });
const mockConnectRedis = jest.fn().mockReturnValue({ mock: 'connectRedis' });
const mockMemoryStore = jest.fn().mockReturnValue({ mock: 'memoryStore' });
const mockRedisStore = jest.fn().mockReturnValue({ mock: 'redisStore' });
const mockIoredisClient = {
call: jest.fn(),
on: jest.fn(),
};
const mockKeyvRedisClient = {};
@@ -57,14 +53,6 @@ jest.mock('rate-limit-redis', () => ({
RedisStore: mockRedisStore,
}));
jest.mock('@librechat/data-schemas', () => ({
logger: {
error: jest.fn(),
warn: jest.fn(),
info: jest.fn(),
},
}));
// Import after mocking
const { standardCache, sessionCache, violationCache, limiterCache } = require('./cacheFactory');
const { cacheConfig } = require('./cacheConfig');
@@ -154,28 +142,6 @@ describe('cacheFactory', () => {
expect(require('@keyv/redis').default).toHaveBeenCalledWith(mockKeyvRedisClient);
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl });
});
it('should throw error when Redis cache creation fails', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'test-namespace';
const ttl = 3600;
const testError = new Error('Redis connection failed');
const KeyvRedis = require('@keyv/redis').default;
KeyvRedis.mockImplementationOnce(() => {
throw testError;
});
expect(() => standardCache(namespace, ttl)).toThrow('Redis connection failed');
const { logger } = require('@librechat/data-schemas');
expect(logger.error).toHaveBeenCalledWith(
`Failed to create Redis cache for namespace ${namespace}:`,
testError,
);
expect(mockKeyv).not.toHaveBeenCalled();
});
});
describe('violationCache', () => {
@@ -267,86 +233,6 @@ describe('cacheFactory', () => {
checkPeriod: Time.ONE_DAY,
});
});
it('should throw error when ConnectRedis constructor fails', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
const ttl = 86400;
// Mock ConnectRedis to throw an error during construction
const redisError = new Error('Redis connection failed');
mockConnectRedis.mockImplementationOnce(() => {
throw redisError;
});
// The error should propagate up, not be caught
expect(() => sessionCache(namespace, ttl)).toThrow('Redis connection failed');
// Verify that MemoryStore was NOT used as fallback
expect(mockMemoryStore).not.toHaveBeenCalled();
});
it('should register error handler but let errors propagate to Express', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
// Create a mock session store with middleware methods
const mockSessionStore = {
get: jest.fn(),
set: jest.fn(),
destroy: jest.fn(),
};
mockConnectRedis.mockReturnValue(mockSessionStore);
const store = sessionCache(namespace);
// Verify error handler was registered
expect(mockIoredisClient.on).toHaveBeenCalledWith('error', expect.any(Function));
// Get the error handler
const errorHandler = mockIoredisClient.on.mock.calls.find((call) => call[0] === 'error')[1];
// Simulate an error from Redis during a session operation
const redisError = new Error('Socket closed unexpectedly');
// The error handler should log but not swallow the error
const { logger } = require('@librechat/data-schemas');
errorHandler(redisError);
expect(logger.error).toHaveBeenCalledWith(
`Session store Redis error for namespace ${namespace}::`,
redisError,
);
// Now simulate what happens when session middleware tries to use the store
const callback = jest.fn();
mockSessionStore.get.mockImplementation((sid, cb) => {
cb(new Error('Redis connection lost'));
});
// Call the store's get method (as Express session would)
store.get('test-session-id', callback);
// The error should be passed to the callback, not swallowed
expect(callback).toHaveBeenCalledWith(new Error('Redis connection lost'));
});
it('should handle null ioredisClient gracefully', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
// Temporarily set ioredisClient to null (simulating connection not established)
const originalClient = require('./redisClients').ioredisClient;
require('./redisClients').ioredisClient = null;
// ConnectRedis might accept null client but would fail on first use
// The important thing is it doesn't throw uncaught exceptions during construction
const store = sessionCache(namespace);
expect(store).toBeDefined();
// Restore original client
require('./redisClients').ioredisClient = originalClient;
});
});
describe('limiterCache', () => {
@@ -388,10 +274,8 @@ describe('cacheFactory', () => {
});
});
it('should pass sendCommand function that calls ioredisClient.call', async () => {
it('should pass sendCommand function that calls ioredisClient.call', () => {
cacheConfig.USE_REDIS = true;
mockIoredisClient.call.mockResolvedValue('test-value');
limiterCache('rate-limit');
const sendCommandCall = mockRedisStore.mock.calls[0][0];
@@ -399,29 +283,9 @@ describe('cacheFactory', () => {
// Test that sendCommand properly delegates to ioredisClient.call
const args = ['GET', 'test-key'];
const result = await sendCommand(...args);
sendCommand(...args);
expect(mockIoredisClient.call).toHaveBeenCalledWith(...args);
expect(result).toBe('test-value');
});
it('should handle sendCommand errors properly', async () => {
cacheConfig.USE_REDIS = true;
// Mock the call method to reject with an error
const testError = new Error('Redis error');
mockIoredisClient.call.mockRejectedValue(testError);
limiterCache('rate-limit');
const sendCommandCall = mockRedisStore.mock.calls[0][0];
const sendCommand = sendCommandCall.sendCommand;
// Test that sendCommand properly handles errors
const args = ['GET', 'test-key'];
await expect(sendCommand(...args)).rejects.toThrow('Redis error');
expect(mockIoredisClient.call).toHaveBeenCalledWith(...args);
});
it('should handle undefined prefix', () => {

View File

@@ -13,82 +13,23 @@ const ca = cacheConfig.REDIS_CA;
/** @type {import('ioredis').Redis | import('ioredis').Cluster | null} */
let ioredisClient = null;
if (cacheConfig.USE_REDIS) {
/** @type {import('ioredis').RedisOptions | import('ioredis').ClusterOptions} */
const redisOptions = {
username: username,
password: password,
tls: ca ? { ca } : undefined,
keyPrefix: `${cacheConfig.REDIS_KEY_PREFIX}${GLOBAL_PREFIX_SEPARATOR}`,
maxListeners: cacheConfig.REDIS_MAX_LISTENERS,
retryStrategy: (times) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
times > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`ioredis giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return null;
}
const delay = Math.min(times * 50, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
reconnectOnError: (err) => {
const targetError = 'READONLY';
if (err.message.includes(targetError)) {
logger.warn('ioredis reconnecting due to READONLY error');
return true;
}
return false;
},
enableOfflineQueue: cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
connectTimeout: cacheConfig.REDIS_CONNECT_TIMEOUT,
maxRetriesPerRequest: 3,
};
ioredisClient =
urls.length === 1
? new IoRedis(cacheConfig.REDIS_URI, redisOptions)
: new IoRedis.Cluster(cacheConfig.REDIS_URI, {
redisOptions,
clusterRetryStrategy: (times) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
times > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`ioredis cluster giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return null;
}
const delay = Math.min(times * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis cluster reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
enableOfflineQueue: cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
});
: new IoRedis.Cluster(cacheConfig.REDIS_URI, { redisOptions });
ioredisClient.on('error', (err) => {
logger.error('ioredis client error:', err);
});
ioredisClient.on('connect', () => {
logger.info('ioredis client connected');
});
ioredisClient.on('ready', () => {
logger.info('ioredis client ready');
});
ioredisClient.on('reconnecting', (delay) => {
logger.info(`ioredis client reconnecting in ${delay}ms`);
});
ioredisClient.on('close', () => {
logger.warn('ioredis client connection closed');
});
/** Ping Interval to keep the Redis server connection alive (if enabled) */
let pingInterval = null;
const clearPingInterval = () => {
@@ -101,9 +42,7 @@ if (cacheConfig.USE_REDIS) {
if (cacheConfig.REDIS_PING_INTERVAL > 0) {
pingInterval = setInterval(() => {
if (ioredisClient && ioredisClient.status === 'ready') {
ioredisClient.ping().catch((err) => {
logger.error('ioredis ping failed:', err);
});
ioredisClient.ping();
}
}, cacheConfig.REDIS_PING_INTERVAL * 1000);
ioredisClient.on('close', clearPingInterval);
@@ -117,32 +56,8 @@ if (cacheConfig.USE_REDIS) {
/**
* ** WARNING ** Keyv Redis client does not support Prefix like ioredis above.
* The prefix feature will be handled by the Keyv-Redis store in cacheFactory.js
* @type {import('@keyv/redis').RedisClientOptions | import('@keyv/redis').RedisClusterOptions}
*/
const redisOptions = {
username,
password,
socket: {
tls: ca != null,
ca,
connectTimeout: cacheConfig.REDIS_CONNECT_TIMEOUT,
reconnectStrategy: (retries) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
retries > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`@keyv/redis client giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return new Error('Max reconnection attempts reached');
}
const delay = Math.min(retries * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`@keyv/redis reconnecting... attempt ${retries}, delay ${delay}ms`);
return delay;
},
},
disableOfflineQueue: !cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
};
const redisOptions = { username, password, socket: { tls: ca != null, ca } };
keyvRedisClient =
urls.length === 1
@@ -158,27 +73,6 @@ if (cacheConfig.USE_REDIS) {
logger.error('@keyv/redis client error:', err);
});
keyvRedisClient.on('connect', () => {
logger.info('@keyv/redis client connected');
});
keyvRedisClient.on('ready', () => {
logger.info('@keyv/redis client ready');
});
keyvRedisClient.on('reconnecting', () => {
logger.info('@keyv/redis client reconnecting...');
});
keyvRedisClient.on('disconnect', () => {
logger.warn('@keyv/redis client disconnected');
});
keyvRedisClient.connect().catch((err) => {
logger.error('@keyv/redis initial connection failed:', err);
throw err;
});
/** Ping Interval to keep the Redis server connection alive (if enabled) */
let pingInterval = null;
const clearPingInterval = () => {
@@ -191,9 +85,7 @@ if (cacheConfig.USE_REDIS) {
if (cacheConfig.REDIS_PING_INTERVAL > 0) {
pingInterval = setInterval(() => {
if (keyvRedisClient && keyvRedisClient.isReady) {
keyvRedisClient.ping().catch((err) => {
logger.error('@keyv/redis ping failed:', err);
});
keyvRedisClient.ping();
}
}, cacheConfig.REDIS_PING_INTERVAL * 1000);
keyvRedisClient.on('disconnect', clearPingInterval);

View File

@@ -316,10 +316,17 @@ const updateAgent = async (searchParameter, updateData, options = {}) => {
if (shouldCreateVersion) {
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions, actionsHash);
if (duplicateVersion && !forceVersion) {
// No changes detected, return the current agent without creating a new version
const agentObj = currentAgent.toObject();
agentObj.version = versions.length;
return agentObj;
const error = new Error(
'Duplicate version: This would create a version identical to an existing one',
);
error.statusCode = 409;
error.details = {
duplicateVersion,
versionIndex: versions.findIndex(
(v) => JSON.stringify(duplicateVersion) === JSON.stringify(v),
),
};
throw error;
}
}

View File

@@ -879,31 +879,45 @@ describe('models/Agent', () => {
expect(emptyParamsAgent.model_parameters).toEqual({});
});
test('should not create new version for duplicate updates', async () => {
const authorId = new mongoose.Types.ObjectId();
const testCases = generateVersionTestCases();
test('should detect duplicate versions and reject updates', async () => {
const originalConsoleError = console.error;
console.error = jest.fn();
for (const testCase of testCases) {
const testAgentId = `agent_${uuidv4()}`;
try {
const authorId = new mongoose.Types.ObjectId();
const testCases = generateVersionTestCases();
await createAgent({
id: testAgentId,
provider: 'test',
model: 'test-model',
author: authorId,
...testCase.initial,
});
for (const testCase of testCases) {
const testAgentId = `agent_${uuidv4()}`;
const updatedAgent = await updateAgent({ id: testAgentId }, testCase.update);
expect(updatedAgent.versions).toHaveLength(2); // No new version created
await createAgent({
id: testAgentId,
provider: 'test',
model: 'test-model',
author: authorId,
...testCase.initial,
});
// Update with duplicate data should succeed but not create a new version
const duplicateUpdate = await updateAgent({ id: testAgentId }, testCase.duplicate);
await updateAgent({ id: testAgentId }, testCase.update);
expect(duplicateUpdate.versions).toHaveLength(2); // No new version created
let error;
try {
await updateAgent({ id: testAgentId }, testCase.duplicate);
} catch (e) {
error = e;
}
const agent = await getAgent({ id: testAgentId });
expect(agent.versions).toHaveLength(2);
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
expect(error.details).toBeDefined();
expect(error.details.duplicateVersion).toBeDefined();
const agent = await getAgent({ id: testAgentId });
expect(agent.versions).toHaveLength(2);
}
} finally {
console.error = originalConsoleError;
}
});
@@ -1079,13 +1093,20 @@ describe('models/Agent', () => {
expect(secondUpdate.versions).toHaveLength(3);
// Update without forceVersion and no changes should not create a version
const duplicateUpdate = await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: false },
);
let error;
try {
await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: false },
);
} catch (e) {
error = e;
}
expect(duplicateUpdate.versions).toHaveLength(3); // No new version created
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
});
test('should handle isDuplicateVersion with arrays containing null/undefined values', async () => {
@@ -2379,18 +2400,11 @@ describe('models/Agent', () => {
agent_ids: ['agent1', 'agent2'],
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ agent_ids: ['agent1', 'agent2', 'agent3'] },
);
expect(updatedAgent.versions).toHaveLength(2);
await updateAgent({ id: agentId }, { agent_ids: ['agent1', 'agent2', 'agent3'] });
// Update with same agent_ids should succeed but not create a new version
const duplicateUpdate = await updateAgent(
{ id: agentId },
{ agent_ids: ['agent1', 'agent2', 'agent3'] },
);
expect(duplicateUpdate.versions).toHaveLength(2); // No new version created
await expect(
updateAgent({ id: agentId }, { agent_ids: ['agent1', 'agent2', 'agent3'] }),
).rejects.toThrow('Duplicate version');
});
test('should handle agent_ids field alongside other fields', async () => {
@@ -2529,10 +2543,9 @@ describe('models/Agent', () => {
expect(updated.versions).toHaveLength(2);
expect(updated.agent_ids).toEqual([]);
// Update with same empty agent_ids should succeed but not create a new version
const duplicateUpdate = await updateAgent({ id: agentId }, { agent_ids: [] });
expect(duplicateUpdate.versions).toHaveLength(2); // No new version created
expect(duplicateUpdate.agent_ids).toEqual([]);
await expect(updateAgent({ id: agentId }, { agent_ids: [] })).rejects.toThrow(
'Duplicate version',
);
});
test('should handle agent without agent_ids field', async () => {

View File

@@ -1,6 +1,6 @@
const { logger } = require('@librechat/data-schemas');
const { createTempChatExpirationDate } = require('@librechat/api');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const { getMessages, deleteMessages } = require('./Message');
const { Conversation } = require('~/db/models');

View File

@@ -1,572 +0,0 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { EModelEndpoint } = require('librechat-data-provider');
const { MongoMemoryServer } = require('mongodb-memory-server');
const {
deleteNullOrEmptyConversations,
searchConversation,
getConvosByCursor,
getConvosQueried,
getConvoFiles,
getConvoTitle,
deleteConvos,
saveConvo,
getConvo,
} = require('./Conversation');
jest.mock('~/server/services/Config/getCustomConfig');
jest.mock('./Message');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const { getMessages, deleteMessages } = require('./Message');
const { Conversation } = require('~/db/models');
describe('Conversation Operations', () => {
let mongoServer;
let mockReq;
let mockConversationData;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
// Clear database
await Conversation.deleteMany({});
// Reset mocks
jest.clearAllMocks();
// Default mock implementations
getMessages.mockResolvedValue([]);
deleteMessages.mockResolvedValue({ deletedCount: 0 });
mockReq = {
user: { id: 'user123' },
body: {},
};
mockConversationData = {
conversationId: uuidv4(),
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
};
});
describe('saveConvo', () => {
it('should save a conversation for an authenticated user', async () => {
const result = await saveConvo(mockReq, mockConversationData);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.user).toBe('user123');
expect(result.title).toBe('Test Conversation');
expect(result.endpoint).toBe(EModelEndpoint.openAI);
// Verify the conversation was actually saved to the database
const savedConvo = await Conversation.findOne({
conversationId: mockConversationData.conversationId,
user: 'user123',
});
expect(savedConvo).toBeTruthy();
expect(savedConvo.title).toBe('Test Conversation');
});
it('should query messages when saving a conversation', async () => {
// Mock messages as ObjectIds
const mongoose = require('mongoose');
const mockMessages = [new mongoose.Types.ObjectId(), new mongoose.Types.ObjectId()];
getMessages.mockResolvedValue(mockMessages);
await saveConvo(mockReq, mockConversationData);
// Verify that getMessages was called with correct parameters
expect(getMessages).toHaveBeenCalledWith(
{ conversationId: mockConversationData.conversationId },
'_id',
);
});
it('should handle newConversationId when provided', async () => {
const newConversationId = uuidv4();
const result = await saveConvo(mockReq, {
...mockConversationData,
newConversationId,
});
expect(result.conversationId).toBe(newConversationId);
});
it('should handle unsetFields metadata', async () => {
const metadata = {
unsetFields: { someField: 1 },
};
await saveConvo(mockReq, mockConversationData, metadata);
const savedConvo = await Conversation.findOne({
conversationId: mockConversationData.conversationId,
});
expect(savedConvo.someField).toBeUndefined();
});
});
describe('isTemporary conversation handling', () => {
it('should save a conversation with expiredAt when isTemporary is true', async () => {
// Mock custom config with 24 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
const afterSave = new Date();
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeDefined();
expect(result.expiredAt).toBeInstanceOf(Date);
// Verify expiredAt is approximately 24 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
new Date(afterSave.getTime() + 24 * 60 * 60 * 1000 + 1000).getTime(),
);
});
it('should save a conversation without expiredAt when isTemporary is false', async () => {
mockReq.body = { isTemporary: false };
const result = await saveConvo(mockReq, mockConversationData);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeNull();
});
it('should save a conversation without expiredAt when isTemporary is not provided', async () => {
// No isTemporary in body
mockReq.body = {};
const result = await saveConvo(mockReq, mockConversationData);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeNull();
});
it('should use custom retention period from config', async () => {
// Mock custom config with 48 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 48,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 48 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 48 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle minimum retention period (1 hour)', async () => {
// Mock custom config with less than minimum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 0.5, // Half hour - should be clamped to 1 hour
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 1 hour in the future (minimum)
const expectedExpirationTime = new Date(beforeSave.getTime() + 1 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle maximum retention period (8760 hours)', async () => {
// Mock custom config with more than maximum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 10000, // Should be clamped to 8760 hours
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 8760 hours (1 year) in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 8760 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle getCustomConfig errors gracefully', async () => {
// Mock getCustomConfig to throw an error
getCustomConfig.mockRejectedValue(new Error('Config service unavailable'));
mockReq.body = { isTemporary: true };
const result = await saveConvo(mockReq, mockConversationData);
// Should still save the conversation but with expiredAt as null
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeNull();
});
it('should use default retention when config is not provided', async () => {
// Mock getCustomConfig to return empty config
getCustomConfig.mockResolvedValue({});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Default retention is 30 days (720 hours)
const expectedExpirationTime = new Date(beforeSave.getTime() + 30 * 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should update expiredAt when saving existing temporary conversation', async () => {
// First save a temporary conversation
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const firstSave = await saveConvo(mockReq, mockConversationData);
const originalExpiredAt = firstSave.expiredAt;
// Wait a bit to ensure time difference
await new Promise((resolve) => setTimeout(resolve, 100));
// Save again with same conversationId but different title
const updatedData = { ...mockConversationData, title: 'Updated Title' };
const secondSave = await saveConvo(mockReq, updatedData);
// Should update title and create new expiredAt
expect(secondSave.title).toBe('Updated Title');
expect(secondSave.expiredAt).toBeDefined();
expect(new Date(secondSave.expiredAt).getTime()).toBeGreaterThan(
new Date(originalExpiredAt).getTime(),
);
});
it('should not set expiredAt when updating non-temporary conversation', async () => {
// First save a non-temporary conversation
mockReq.body = { isTemporary: false };
const firstSave = await saveConvo(mockReq, mockConversationData);
expect(firstSave.expiredAt).toBeNull();
// Update without isTemporary flag
mockReq.body = {};
const updatedData = { ...mockConversationData, title: 'Updated Title' };
const secondSave = await saveConvo(mockReq, updatedData);
expect(secondSave.title).toBe('Updated Title');
expect(secondSave.expiredAt).toBeNull();
});
it('should filter out expired conversations in getConvosByCursor', async () => {
// Create some test conversations
const nonExpiredConvo = await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Non-expired',
endpoint: EModelEndpoint.openAI,
expiredAt: null,
updatedAt: new Date(),
});
await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Future expired',
endpoint: EModelEndpoint.openAI,
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000), // 24 hours from now
updatedAt: new Date(),
});
// Mock Meili search
Conversation.meiliSearch = jest.fn().mockResolvedValue({ hits: [] });
const result = await getConvosByCursor('user123');
// Should only return conversations with null or non-existent expiredAt
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].conversationId).toBe(nonExpiredConvo.conversationId);
});
it('should filter out expired conversations in getConvosQueried', async () => {
// Create test conversations
const nonExpiredConvo = await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Non-expired',
endpoint: EModelEndpoint.openAI,
expiredAt: null,
});
const expiredConvo = await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Expired',
endpoint: EModelEndpoint.openAI,
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000),
});
const convoIds = [
{ conversationId: nonExpiredConvo.conversationId },
{ conversationId: expiredConvo.conversationId },
];
const result = await getConvosQueried('user123', convoIds);
// Should only return the non-expired conversation
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].conversationId).toBe(nonExpiredConvo.conversationId);
expect(result.convoMap[nonExpiredConvo.conversationId]).toBeDefined();
expect(result.convoMap[expiredConvo.conversationId]).toBeUndefined();
});
});
describe('searchConversation', () => {
it('should find a conversation by conversationId', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'Test',
endpoint: EModelEndpoint.openAI,
});
const result = await searchConversation(mockConversationData.conversationId);
expect(result).toBeTruthy();
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.user).toBe('user123');
expect(result.title).toBeUndefined(); // Only returns conversationId and user
});
it('should return null if conversation not found', async () => {
const result = await searchConversation('non-existent-id');
expect(result).toBeNull();
});
});
describe('getConvo', () => {
it('should retrieve a conversation for a user', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
});
const result = await getConvo('user123', mockConversationData.conversationId);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.user).toBe('user123');
expect(result.title).toBe('Test Conversation');
});
it('should return null if conversation not found', async () => {
const result = await getConvo('user123', 'non-existent-id');
expect(result).toBeNull();
});
});
describe('getConvoTitle', () => {
it('should return the conversation title', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'Test Title',
endpoint: EModelEndpoint.openAI,
});
const result = await getConvoTitle('user123', mockConversationData.conversationId);
expect(result).toBe('Test Title');
});
it('should return null if conversation has no title', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: null,
endpoint: EModelEndpoint.openAI,
});
const result = await getConvoTitle('user123', mockConversationData.conversationId);
expect(result).toBeNull();
});
it('should return "New Chat" if conversation not found', async () => {
const result = await getConvoTitle('user123', 'non-existent-id');
expect(result).toBe('New Chat');
});
});
describe('getConvoFiles', () => {
it('should return conversation files', async () => {
const files = ['file1', 'file2'];
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
endpoint: EModelEndpoint.openAI,
files,
});
const result = await getConvoFiles(mockConversationData.conversationId);
expect(result).toEqual(files);
});
it('should return empty array if no files', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
endpoint: EModelEndpoint.openAI,
});
const result = await getConvoFiles(mockConversationData.conversationId);
expect(result).toEqual([]);
});
it('should return empty array if conversation not found', async () => {
const result = await getConvoFiles('non-existent-id');
expect(result).toEqual([]);
});
});
describe('deleteConvos', () => {
it('should delete conversations and associated messages', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'To Delete',
endpoint: EModelEndpoint.openAI,
});
deleteMessages.mockResolvedValue({ deletedCount: 5 });
const result = await deleteConvos('user123', {
conversationId: mockConversationData.conversationId,
});
expect(result.deletedCount).toBe(1);
expect(result.messages.deletedCount).toBe(5);
expect(deleteMessages).toHaveBeenCalledWith({
conversationId: { $in: [mockConversationData.conversationId] },
});
// Verify conversation was deleted
const deletedConvo = await Conversation.findOne({
conversationId: mockConversationData.conversationId,
});
expect(deletedConvo).toBeNull();
});
it('should throw error if no conversations found', async () => {
await expect(deleteConvos('user123', { conversationId: 'non-existent' })).rejects.toThrow(
'Conversation not found or already deleted.',
);
});
});
describe('deleteNullOrEmptyConversations', () => {
it('should delete conversations with null, empty, or missing conversationIds', async () => {
// Since conversationId is required by the schema, we can't create documents with null/missing IDs
// This test should verify the function works when such documents exist (e.g., from data corruption)
// For this test, let's create a valid conversation and verify the function doesn't delete it
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user4',
endpoint: EModelEndpoint.openAI,
});
deleteMessages.mockResolvedValue({ deletedCount: 0 });
const result = await deleteNullOrEmptyConversations();
expect(result.conversations.deletedCount).toBe(0); // No invalid conversations to delete
expect(result.messages.deletedCount).toBe(0);
// Verify valid conversation remains
const remainingConvos = await Conversation.find({});
expect(remainingConvos).toHaveLength(1);
expect(remainingConvos[0].conversationId).toBe(mockConversationData.conversationId);
});
});
describe('Error Handling', () => {
it('should handle database errors in saveConvo', async () => {
// Force a database error by disconnecting
await mongoose.disconnect();
const result = await saveConvo(mockReq, mockConversationData);
expect(result).toEqual({ message: 'Error saving conversation' });
// Reconnect for other tests
await mongoose.connect(mongoServer.getUri());
});
});
});

View File

@@ -1,7 +1,7 @@
const { z } = require('zod');
const { logger } = require('@librechat/data-schemas');
const { createTempChatExpirationDate } = require('@librechat/api');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
const { Message } = require('~/db/models');
const idSchema = z.string().uuid();

View File

@@ -1,21 +1,17 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { v4: uuidv4 } = require('uuid');
const { messageSchema } = require('@librechat/data-schemas');
const { MongoMemoryServer } = require('mongodb-memory-server');
const {
saveMessage,
getMessages,
updateMessage,
deleteMessages,
bulkSaveMessages,
updateMessageText,
deleteMessagesSince,
} = require('./Message');
jest.mock('~/server/services/Config/getCustomConfig');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
/**
* @type {import('mongoose').Model<import('@librechat/data-schemas').IMessage>}
*/
@@ -121,21 +117,21 @@ describe('Message Operations', () => {
const conversationId = uuidv4();
// Create multiple messages in the same conversation
await saveMessage(mockReq, {
const message1 = await saveMessage(mockReq, {
messageId: 'msg1',
conversationId,
text: 'First message',
user: 'user123',
});
await saveMessage(mockReq, {
const message2 = await saveMessage(mockReq, {
messageId: 'msg2',
conversationId,
text: 'Second message',
user: 'user123',
});
await saveMessage(mockReq, {
const message3 = await saveMessage(mockReq, {
messageId: 'msg3',
conversationId,
text: 'Third message',
@@ -318,265 +314,4 @@ describe('Message Operations', () => {
expect(messages[0].text).toBe('Victim message');
});
});
describe('isTemporary message handling', () => {
beforeEach(() => {
// Reset mocks before each test
jest.clearAllMocks();
});
it('should save a message with expiredAt when isTemporary is true', async () => {
// Mock custom config with 24 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
const afterSave = new Date();
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeDefined();
expect(result.expiredAt).toBeInstanceOf(Date);
// Verify expiredAt is approximately 24 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
new Date(afterSave.getTime() + 24 * 60 * 60 * 1000 + 1000).getTime(),
);
});
it('should save a message without expiredAt when isTemporary is false', async () => {
mockReq.body = { isTemporary: false };
const result = await saveMessage(mockReq, mockMessageData);
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeNull();
});
it('should save a message without expiredAt when isTemporary is not provided', async () => {
// No isTemporary in body
mockReq.body = {};
const result = await saveMessage(mockReq, mockMessageData);
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeNull();
});
it('should use custom retention period from config', async () => {
// Mock custom config with 48 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 48,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 48 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 48 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle minimum retention period (1 hour)', async () => {
// Mock custom config with less than minimum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 0.5, // Half hour - should be clamped to 1 hour
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 1 hour in the future (minimum)
const expectedExpirationTime = new Date(beforeSave.getTime() + 1 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle maximum retention period (8760 hours)', async () => {
// Mock custom config with more than maximum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 10000, // Should be clamped to 8760 hours
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 8760 hours (1 year) in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 8760 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle getCustomConfig errors gracefully', async () => {
// Mock getCustomConfig to throw an error
getCustomConfig.mockRejectedValue(new Error('Config service unavailable'));
mockReq.body = { isTemporary: true };
const result = await saveMessage(mockReq, mockMessageData);
// Should still save the message but with expiredAt as null
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeNull();
});
it('should use default retention when config is not provided', async () => {
// Mock getCustomConfig to return empty config
getCustomConfig.mockResolvedValue({});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Default retention is 30 days (720 hours)
const expectedExpirationTime = new Date(beforeSave.getTime() + 30 * 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should not update expiredAt on message update', async () => {
// First save a temporary message
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const savedMessage = await saveMessage(mockReq, mockMessageData);
const originalExpiredAt = savedMessage.expiredAt;
// Now update the message without isTemporary flag
mockReq.body = {};
const updatedMessage = await updateMessage(mockReq, {
messageId: 'msg123',
text: 'Updated text',
});
// expiredAt should not be in the returned updated message object
expect(updatedMessage.expiredAt).toBeUndefined();
// Verify in database that expiredAt wasn't changed
const dbMessage = await Message.findOne({ messageId: 'msg123', user: 'user123' });
expect(dbMessage.expiredAt).toEqual(originalExpiredAt);
});
it('should preserve expiredAt when saving existing temporary message', async () => {
// First save a temporary message
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const firstSave = await saveMessage(mockReq, mockMessageData);
const originalExpiredAt = firstSave.expiredAt;
// Wait a bit to ensure time difference
await new Promise((resolve) => setTimeout(resolve, 100));
// Save again with same messageId but different text
const updatedData = { ...mockMessageData, text: 'Updated text' };
const secondSave = await saveMessage(mockReq, updatedData);
// Should update text but create new expiredAt
expect(secondSave.text).toBe('Updated text');
expect(secondSave.expiredAt).toBeDefined();
expect(new Date(secondSave.expiredAt).getTime()).toBeGreaterThan(
new Date(originalExpiredAt).getTime(),
);
});
it('should handle bulk operations with temporary messages', async () => {
// This test verifies bulkSaveMessages doesn't interfere with expiredAt
const messages = [
{
messageId: 'bulk1',
conversationId: uuidv4(),
text: 'Bulk message 1',
user: 'user123',
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000),
},
{
messageId: 'bulk2',
conversationId: uuidv4(),
text: 'Bulk message 2',
user: 'user123',
expiredAt: null,
},
];
await bulkSaveMessages(messages);
const savedMessages = await Message.find({
messageId: { $in: ['bulk1', 'bulk2'] },
}).lean();
expect(savedMessages).toHaveLength(2);
const bulk1 = savedMessages.find((m) => m.messageId === 'bulk1');
const bulk2 = savedMessages.find((m) => m.messageId === 'bulk2');
expect(bulk1.expiredAt).toBeDefined();
expect(bulk2.expiredAt).toBeNull();
});
});
});

View File

@@ -1,4 +1,4 @@
const { matchModelName } = require('../utils/tokens');
const { matchModelName } = require('../utils');
const defaultRate = 6;
/**
@@ -87,9 +87,6 @@ const tokenValues = Object.assign(
'gpt-4.1': { prompt: 2, completion: 8 },
'gpt-4.5': { prompt: 75, completion: 150 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-5': { prompt: 1.25, completion: 10 },
'gpt-5-mini': { prompt: 0.25, completion: 2 },
'gpt-5-nano': { prompt: 0.05, completion: 0.4 },
'gpt-4o': { prompt: 2.5, completion: 10 },
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
'gpt-4-1106': { prompt: 10, completion: 30 },
@@ -150,9 +147,6 @@ const tokenValues = Object.assign(
codestral: { prompt: 0.3, completion: 0.9 },
'ministral-8b': { prompt: 0.1, completion: 0.1 },
'ministral-3b': { prompt: 0.04, completion: 0.04 },
// GPT-OSS models
'gpt-oss-20b': { prompt: 0.05, completion: 0.2 },
'gpt-oss-120b': { prompt: 0.15, completion: 0.6 },
},
bedrockValues,
);
@@ -220,12 +214,6 @@ const getValueKey = (model, endpoint) => {
return 'gpt-4.1';
} else if (modelName.includes('gpt-4o-2024-05-13')) {
return 'gpt-4o-2024-05-13';
} else if (modelName.includes('gpt-5-nano')) {
return 'gpt-5-nano';
} else if (modelName.includes('gpt-5-mini')) {
return 'gpt-5-mini';
} else if (modelName.includes('gpt-5')) {
return 'gpt-5';
} else if (modelName.includes('gpt-4o-mini')) {
return 'gpt-4o-mini';
} else if (modelName.includes('gpt-4o')) {

View File

@@ -25,14 +25,8 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4-some-other-info')).toBe('8k');
});
it('should return "gpt-5" for model name containing "gpt-5"', () => {
expect(getValueKey('gpt-5-some-other-info')).toBe('gpt-5');
expect(getValueKey('gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-2025-01-30-0130')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-turbo')).toBe('gpt-5');
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
it('should return undefined for model names that do not match any known patterns', () => {
expect(getValueKey('gpt-5-some-other-info')).toBeUndefined();
});
it('should return "gpt-3.5-turbo-1106" for model name containing "gpt-3.5-turbo-1106"', () => {
@@ -90,29 +84,6 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4.1-nano-0125')).toBe('gpt-4.1-nano');
});
it('should return "gpt-5" for model type of "gpt-5"', () => {
expect(getValueKey('gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-2025-01-30-0130')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5')).toBe('gpt-5');
expect(getValueKey('openai/gpt-5-2025-01-30')).toBe('gpt-5');
expect(getValueKey('gpt-5-turbo')).toBe('gpt-5');
expect(getValueKey('gpt-5-0130')).toBe('gpt-5');
});
it('should return "gpt-5-mini" for model type of "gpt-5-mini"', () => {
expect(getValueKey('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
expect(getValueKey('openai/gpt-5-mini')).toBe('gpt-5-mini');
expect(getValueKey('gpt-5-mini-0130')).toBe('gpt-5-mini');
expect(getValueKey('gpt-5-mini-2025-01-30-0130')).toBe('gpt-5-mini');
});
it('should return "gpt-5-nano" for model type of "gpt-5-nano"', () => {
expect(getValueKey('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
expect(getValueKey('openai/gpt-5-nano')).toBe('gpt-5-nano');
expect(getValueKey('gpt-5-nano-0130')).toBe('gpt-5-nano');
expect(getValueKey('gpt-5-nano-2025-01-30-0130')).toBe('gpt-5-nano');
});
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
@@ -236,48 +207,6 @@ describe('getMultiplier', () => {
);
});
it('should return the correct multiplier for gpt-5', () => {
const valueKey = getValueKey('gpt-5-2025-01-30');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-5'].completion,
);
expect(getMultiplier({ model: 'gpt-5-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-5', tokenType: 'completion' })).toBe(
tokenValues['gpt-5'].completion,
);
});
it('should return the correct multiplier for gpt-5-mini', () => {
const valueKey = getValueKey('gpt-5-mini-2025-01-30');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-mini'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-5-mini'].completion,
);
expect(getMultiplier({ model: 'gpt-5-mini-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5-mini'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-5-mini', tokenType: 'completion' })).toBe(
tokenValues['gpt-5-mini'].completion,
);
});
it('should return the correct multiplier for gpt-5-nano', () => {
const valueKey = getValueKey('gpt-5-nano-2025-01-30');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-nano'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-5-nano'].completion,
);
expect(getMultiplier({ model: 'gpt-5-nano-preview', tokenType: 'prompt' })).toBe(
tokenValues['gpt-5-nano'].prompt,
);
expect(getMultiplier({ model: 'openai/gpt-5-nano', tokenType: 'completion' })).toBe(
tokenValues['gpt-5-nano'].completion,
);
});
it('should return the correct multiplier for gpt-4o', () => {
const valueKey = getValueKey('gpt-4o-2024-08-06');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
@@ -378,22 +307,10 @@ describe('getMultiplier', () => {
});
it('should return defaultRate if derived valueKey does not match any known patterns', () => {
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-10-some-other-info' })).toBe(
expect(getMultiplier({ tokenType: 'prompt', model: 'gpt-5-some-other-info' })).toBe(
defaultRate,
);
});
it('should return correct multipliers for GPT-OSS models', () => {
const models = ['gpt-oss-20b', 'gpt-oss-120b'];
models.forEach((key) => {
const expectedPrompt = tokenValues[key].prompt;
const expectedCompletion = tokenValues[key].completion;
expect(getMultiplier({ valueKey: key, tokenType: 'prompt' })).toBe(expectedPrompt);
expect(getMultiplier({ valueKey: key, tokenType: 'completion' })).toBe(expectedCompletion);
expect(getMultiplier({ model: key, tokenType: 'prompt' })).toBe(expectedPrompt);
expect(getMultiplier({ model: key, tokenType: 'completion' })).toBe(expectedCompletion);
});
});
});
describe('AWS Bedrock Model Tests', () => {

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.8.0-rc2",
"version": "v0.7.9",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -49,10 +49,10 @@
"@langchain/google-vertexai": "^0.2.13",
"@langchain/openai": "^0.5.18",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.75",
"@librechat/agents": "^2.4.68",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@modelcontextprotocol/sdk": "^1.17.1",
"@modelcontextprotocol/sdk": "^1.17.0",
"@node-saml/passport-saml": "^5.1.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2",

View File

@@ -0,0 +1,46 @@
const { logger } = require('~/config');
//handle duplicates
const handleDuplicateKeyError = (err, res) => {
logger.error('Duplicate key error:', err.keyValue);
const field = `${JSON.stringify(Object.keys(err.keyValue))}`;
const code = 409;
res
.status(code)
.send({ messages: `An document with that ${field} already exists.`, fields: field });
};
//handle validation errors
const handleValidationError = (err, res) => {
logger.error('Validation error:', err.errors);
let errors = Object.values(err.errors).map((el) => el.message);
let fields = `${JSON.stringify(Object.values(err.errors).map((el) => el.path))}`;
let code = 400;
if (errors.length > 1) {
errors = errors.join(' ');
res.status(code).send({ messages: `${JSON.stringify(errors)}`, fields: fields });
} else {
res.status(code).send({ messages: `${JSON.stringify(errors)}`, fields: fields });
}
};
module.exports = (err, _req, res, _next) => {
try {
if (err.name === 'ValidationError') {
return handleValidationError(err, res);
}
if (err.code && err.code == 11000) {
return handleDuplicateKeyError(err, res);
}
// Special handling for errors like SyntaxError
if (err.statusCode && err.body) {
return res.status(err.statusCode).send(err.body);
}
logger.error('ErrorController => error', err);
return res.status(500).send('An unknown error occurred.');
} catch (err) {
logger.error('ErrorController => processing error', err);
return res.status(500).send('Processing error in ErrorController.');
}
};

View File

@@ -1,43 +1,36 @@
import { logger } from '@librechat/data-schemas';
import { ErrorController } from './error';
import type { Request, Response } from 'express';
import type { ValidationError, MongoServerError, CustomError } from '~/types';
const errorController = require('./ErrorController');
const { logger } = require('~/config');
// Mock the logger
jest.mock('@librechat/data-schemas', () => ({
...jest.requireActual('@librechat/data-schemas'),
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
warn: jest.fn(),
},
}));
describe('ErrorController', () => {
let mockReq: Request;
let mockRes: Response;
let mockReq, mockRes, mockNext;
beforeEach(() => {
mockReq = {
originalUrl: '',
} as Request;
mockReq = {};
mockRes = {
status: jest.fn().mockReturnThis(),
send: jest.fn(),
} as unknown as Response;
(logger.error as jest.Mock).mockClear();
};
mockNext = jest.fn();
logger.error.mockClear();
});
describe('ValidationError handling', () => {
it('should handle ValidationError with single error', () => {
const validationError = {
name: 'ValidationError',
message: 'Validation error',
errors: {
email: { message: 'Email is required', path: 'email' },
},
} as ValidationError;
};
ErrorController(validationError, mockReq, mockRes);
errorController(validationError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith({
@@ -50,14 +43,13 @@ describe('ErrorController', () => {
it('should handle ValidationError with multiple errors', () => {
const validationError = {
name: 'ValidationError',
message: 'Validation error',
errors: {
email: { message: 'Email is required', path: 'email' },
password: { message: 'Password is required', path: 'password' },
},
} as ValidationError;
};
ErrorController(validationError, mockReq, mockRes);
errorController(validationError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith({
@@ -71,9 +63,9 @@ describe('ErrorController', () => {
const validationError = {
name: 'ValidationError',
errors: {},
} as ValidationError;
};
ErrorController(validationError, mockReq, mockRes);
errorController(validationError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith({
@@ -86,59 +78,43 @@ describe('ErrorController', () => {
describe('Duplicate key error handling', () => {
it('should handle duplicate key error (code 11000)', () => {
const duplicateKeyError = {
name: 'MongoServerError',
message: 'Duplicate key error',
code: 11000,
keyValue: { email: 'test@example.com' },
errmsg:
'E11000 duplicate key error collection: test.users index: email_1 dup key: { email: "test@example.com" }',
} as MongoServerError;
};
ErrorController(duplicateKeyError, mockReq, mockRes);
errorController(duplicateKeyError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(409);
expect(mockRes.send).toHaveBeenCalledWith({
messages: 'An document with that ["email"] already exists.',
fields: '["email"]',
});
expect(logger.warn).toHaveBeenCalledWith(
'Duplicate key error: E11000 duplicate key error collection: test.users index: email_1 dup key: { email: "test@example.com" }',
);
expect(logger.error).toHaveBeenCalledWith('Duplicate key error:', duplicateKeyError.keyValue);
});
it('should handle duplicate key error with multiple fields', () => {
const duplicateKeyError = {
name: 'MongoServerError',
message: 'Duplicate key error',
code: 11000,
keyValue: { email: 'test@example.com', username: 'testuser' },
errmsg:
'E11000 duplicate key error collection: test.users index: email_1 dup key: { email: "test@example.com" }',
} as MongoServerError;
};
ErrorController(duplicateKeyError, mockReq, mockRes);
errorController(duplicateKeyError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(409);
expect(mockRes.send).toHaveBeenCalledWith({
messages: 'An document with that ["email","username"] already exists.',
fields: '["email","username"]',
});
expect(logger.warn).toHaveBeenCalledWith(
'Duplicate key error: E11000 duplicate key error collection: test.users index: email_1 dup key: { email: "test@example.com" }',
);
expect(logger.error).toHaveBeenCalledWith('Duplicate key error:', duplicateKeyError.keyValue);
});
it('should handle error with code 11000 as string', () => {
const duplicateKeyError = {
name: 'MongoServerError',
message: 'Duplicate key error',
code: 11000,
code: '11000',
keyValue: { email: 'test@example.com' },
errmsg:
'E11000 duplicate key error collection: test.users index: email_1 dup key: { email: "test@example.com" }',
} as MongoServerError;
};
ErrorController(duplicateKeyError, mockReq, mockRes);
errorController(duplicateKeyError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(409);
expect(mockRes.send).toHaveBeenCalledWith({
@@ -153,9 +129,9 @@ describe('ErrorController', () => {
const syntaxError = {
statusCode: 400,
body: 'Invalid JSON syntax',
} as CustomError;
};
ErrorController(syntaxError, mockReq, mockRes);
errorController(syntaxError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith('Invalid JSON syntax');
@@ -165,9 +141,9 @@ describe('ErrorController', () => {
const customError = {
statusCode: 422,
body: { error: 'Unprocessable entity' },
} as CustomError;
};
ErrorController(customError, mockReq, mockRes);
errorController(customError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(422);
expect(mockRes.send).toHaveBeenCalledWith({ error: 'Unprocessable entity' });
@@ -176,9 +152,9 @@ describe('ErrorController', () => {
it('should handle error with statusCode but no body', () => {
const partialError = {
statusCode: 400,
} as CustomError;
};
ErrorController(partialError, mockReq, mockRes);
errorController(partialError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
@@ -187,9 +163,9 @@ describe('ErrorController', () => {
it('should handle error with body but no statusCode', () => {
const partialError = {
body: 'Some error message',
} as CustomError;
};
ErrorController(partialError, mockReq, mockRes);
errorController(partialError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
@@ -200,7 +176,7 @@ describe('ErrorController', () => {
it('should handle unknown errors', () => {
const unknownError = new Error('Some unknown error');
ErrorController(unknownError, mockReq, mockRes);
errorController(unknownError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
@@ -211,31 +187,32 @@ describe('ErrorController', () => {
const mongoError = {
code: 11100,
message: 'Some MongoDB error',
} as MongoServerError;
};
ErrorController(mongoError, mockReq, mockRes);
errorController(mongoError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
expect(logger.error).toHaveBeenCalledWith('ErrorController => error', mongoError);
});
it('should handle generic errors', () => {
const genericError = new Error('Test error');
ErrorController(genericError, mockReq, mockRes);
it('should handle null/undefined errors', () => {
errorController(null, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
expect(logger.error).toHaveBeenCalledWith('ErrorController => error', genericError);
expect(mockRes.send).toHaveBeenCalledWith('Processing error in ErrorController.');
expect(logger.error).toHaveBeenCalledWith(
'ErrorController => processing error',
expect.any(Error),
);
});
});
describe('Catch block handling', () => {
beforeEach(() => {
// Restore logger mock to normal behavior for these tests
(logger.error as jest.Mock).mockRestore();
(logger.error as jest.Mock) = jest.fn();
logger.error.mockRestore();
logger.error = jest.fn();
});
it('should handle errors when logger.error throws', () => {
@@ -243,10 +220,10 @@ describe('ErrorController', () => {
const freshMockRes = {
status: jest.fn().mockReturnThis(),
send: jest.fn(),
} as unknown as Response;
};
// Mock logger to throw on the first call, succeed on the second
(logger.error as jest.Mock)
logger.error
.mockImplementationOnce(() => {
throw new Error('Logger error');
})
@@ -254,7 +231,7 @@ describe('ErrorController', () => {
const testError = new Error('Test error');
ErrorController(testError, mockReq, freshMockRes);
errorController(testError, mockReq, freshMockRes, mockNext);
expect(freshMockRes.status).toHaveBeenCalledWith(500);
expect(freshMockRes.send).toHaveBeenCalledWith('Processing error in ErrorController.');

View File

@@ -5,7 +5,6 @@ const { logger } = require('~/config');
/**
* @param {ServerRequest} req
* @returns {Promise<TModelsConfig>} The models config.
*/
const getModelsConfig = async (req) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);

View File

@@ -1,16 +1,54 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, Constants } = require('librechat-data-provider');
const {
getToolkitKey,
checkPluginAuth,
filterUniquePlugins,
convertMCPToolsToPlugins,
} = require('@librechat/api');
const { CacheKeys, AuthType, Constants } = require('librechat-data-provider');
const { getCustomConfig, getCachedTools } = require('~/server/services/Config');
const { availableTools, toolkits } = require('~/app/clients/tools');
const { getToolkitKey } = require('~/server/services/ToolService');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { availableTools } = require('~/app/clients/tools');
const { getLogStores } = require('~/cache');
/**
* Filters out duplicate plugins from the list of plugins.
*
* @param {TPlugin[]} plugins The list of plugins to filter.
* @returns {TPlugin[]} The list of plugins with duplicates removed.
*/
const filterUniquePlugins = (plugins) => {
const seen = new Set();
return plugins.filter((plugin) => {
const duplicate = seen.has(plugin.pluginKey);
seen.add(plugin.pluginKey);
return !duplicate;
});
};
/**
* Determines if a plugin is authenticated by checking if all required authentication fields have non-empty values.
* Supports alternate authentication fields, allowing validation against multiple possible environment variables.
*
* @param {TPlugin} plugin The plugin object containing the authentication configuration.
* @returns {boolean} True if the plugin is authenticated for all required fields, false otherwise.
*/
const checkPluginAuth = (plugin) => {
if (!plugin.authConfig || plugin.authConfig.length === 0) {
return false;
}
return plugin.authConfig.every((authFieldObj) => {
const authFieldOptions = authFieldObj.authField.split('||');
let isFieldAuthenticated = false;
for (const fieldOption of authFieldOptions) {
const envValue = process.env[fieldOption];
if (envValue && envValue.trim() !== '' && envValue !== AuthType.USER_PROVIDED) {
isFieldAuthenticated = true;
break;
}
}
return isFieldAuthenticated;
});
};
const getAvailablePluginsController = async (req, res) => {
try {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
@@ -105,9 +143,9 @@ const getAvailableTools = async (req, res) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedToolsArray = await cache.get(CacheKeys.TOOLS);
const cachedUserTools = await getCachedTools({ userId });
const userPlugins = convertMCPToolsToPlugins({ functionTools: cachedUserTools, customConfig });
const userPlugins = convertMCPToolsToPlugins(cachedUserTools, customConfig);
if (cachedToolsArray != null && userPlugins != null) {
if (cachedToolsArray && userPlugins) {
const dedupedTools = filterUniquePlugins([...userPlugins, ...cachedToolsArray]);
res.status(200).json(dedupedTools);
return;
@@ -147,9 +185,7 @@ const getAvailableTools = async (req, res) => {
const isToolDefined = toolDefinitions[plugin.pluginKey] !== undefined;
const isToolkit =
plugin.toolkit === true &&
Object.keys(toolDefinitions).some(
(key) => getToolkitKey({ toolkits, toolName: key }) === plugin.pluginKey,
);
Object.keys(toolDefinitions).some((key) => getToolkitKey(key) === plugin.pluginKey);
if (!isToolDefined && !isToolkit) {
continue;
@@ -199,6 +235,58 @@ const getAvailableTools = async (req, res) => {
}
};
/**
* Converts MCP function format tools to plugin format
* @param {Object} functionTools - Object with function format tools
* @param {Object} customConfig - Custom configuration for MCP servers
* @returns {Array} Array of plugin objects
*/
function convertMCPToolsToPlugins(functionTools, customConfig) {
const plugins = [];
for (const [toolKey, toolData] of Object.entries(functionTools)) {
if (!toolData.function || !toolKey.includes(Constants.mcp_delimiter)) {
continue;
}
const functionData = toolData.function;
const parts = toolKey.split(Constants.mcp_delimiter);
const serverName = parts[parts.length - 1];
const serverConfig = customConfig?.mcpServers?.[serverName];
const plugin = {
name: parts[0], // Use the tool name without server suffix
pluginKey: toolKey,
description: functionData.description || '',
authenticated: true,
icon: serverConfig?.iconPath,
};
// Build authConfig for MCP tools
if (!serverConfig?.customUserVars) {
plugin.authConfig = [];
plugins.push(plugin);
continue;
}
const customVarKeys = Object.keys(serverConfig.customUserVars);
if (customVarKeys.length === 0) {
plugin.authConfig = [];
} else {
plugin.authConfig = Object.entries(serverConfig.customUserVars).map(([key, value]) => ({
authField: key,
label: value.title || key,
description: value.description || '',
}));
}
plugins.push(plugin);
}
return plugins;
}
module.exports = {
getAvailableTools,
getAvailablePluginsController,

View File

@@ -28,211 +28,19 @@ jest.mock('~/config', () => ({
jest.mock('~/app/clients/tools', () => ({
availableTools: [],
toolkits: [],
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(),
}));
jest.mock('@librechat/api', () => ({
getToolkitKey: jest.fn(),
checkPluginAuth: jest.fn(),
filterUniquePlugins: jest.fn(),
convertMCPToolsToPlugins: jest.fn(),
}));
// Import the actual module with the function we want to test
const { getAvailableTools, getAvailablePluginsController } = require('./PluginController');
const {
filterUniquePlugins,
checkPluginAuth,
convertMCPToolsToPlugins,
getToolkitKey,
} = require('@librechat/api');
const { getAvailableTools } = require('./PluginController');
describe('PluginController', () => {
let mockReq, mockRes, mockCache;
beforeEach(() => {
jest.clearAllMocks();
mockReq = { user: { id: 'test-user-id' } };
mockRes = { status: jest.fn().mockReturnThis(), json: jest.fn() };
mockCache = { get: jest.fn(), set: jest.fn() };
getLogStores.mockReturnValue(mockCache);
});
describe('getAvailablePluginsController', () => {
beforeEach(() => {
mockReq.app = { locals: { filteredTools: [], includedTools: [] } };
});
it('should use filterUniquePlugins to remove duplicate plugins', async () => {
const mockPlugins = [
{ name: 'Plugin1', pluginKey: 'key1', description: 'First' },
{ name: 'Plugin2', pluginKey: 'key2', description: 'Second' },
];
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue(mockPlugins);
checkPluginAuth.mockReturnValue(true);
await getAvailablePluginsController(mockReq, mockRes);
expect(filterUniquePlugins).toHaveBeenCalled();
expect(mockRes.status).toHaveBeenCalledWith(200);
// The response includes authenticated: true for each plugin when checkPluginAuth returns true
expect(mockRes.json).toHaveBeenCalledWith([
{ name: 'Plugin1', pluginKey: 'key1', description: 'First', authenticated: true },
{ name: 'Plugin2', pluginKey: 'key2', description: 'Second', authenticated: true },
]);
});
it('should use checkPluginAuth to verify plugin authentication', async () => {
const mockPlugin = { name: 'Plugin1', pluginKey: 'key1', description: 'First' };
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue([mockPlugin]);
checkPluginAuth.mockReturnValueOnce(true);
await getAvailablePluginsController(mockReq, mockRes);
expect(checkPluginAuth).toHaveBeenCalledWith(mockPlugin);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData[0].authenticated).toBe(true);
});
it('should return cached plugins when available', async () => {
const cachedPlugins = [
{ name: 'CachedPlugin', pluginKey: 'cached', description: 'Cached plugin' },
];
mockCache.get.mockResolvedValue(cachedPlugins);
await getAvailablePluginsController(mockReq, mockRes);
expect(filterUniquePlugins).not.toHaveBeenCalled();
expect(checkPluginAuth).not.toHaveBeenCalled();
expect(mockRes.json).toHaveBeenCalledWith(cachedPlugins);
});
it('should filter plugins based on includedTools', async () => {
const mockPlugins = [
{ name: 'Plugin1', pluginKey: 'key1', description: 'First' },
{ name: 'Plugin2', pluginKey: 'key2', description: 'Second' },
];
mockReq.app.locals.includedTools = ['key1'];
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue(mockPlugins);
checkPluginAuth.mockReturnValue(false);
await getAvailablePluginsController(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData).toHaveLength(1);
expect(responseData[0].pluginKey).toBe('key1');
});
});
describe('getAvailableTools', () => {
it('should use convertMCPToolsToPlugins for user-specific MCP tools', async () => {
const mockUserTools = {
[`tool1${Constants.mcp_delimiter}server1`]: {
function: { name: 'tool1', description: 'Tool 1' },
},
};
const mockConvertedPlugins = [
{
name: 'tool1',
pluginKey: `tool1${Constants.mcp_delimiter}server1`,
description: 'Tool 1',
},
];
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce(mockUserTools);
convertMCPToolsToPlugins.mockReturnValue(mockConvertedPlugins);
filterUniquePlugins.mockImplementation((plugins) => plugins);
getCustomConfig.mockResolvedValue(null);
await getAvailableTools(mockReq, mockRes);
expect(convertMCPToolsToPlugins).toHaveBeenCalledWith({
functionTools: mockUserTools,
customConfig: null,
});
});
it('should use filterUniquePlugins to deduplicate combined tools', async () => {
const mockUserPlugins = [
{ name: 'UserTool', pluginKey: 'user-tool', description: 'User tool' },
];
const mockManifestPlugins = [
{ name: 'ManifestTool', pluginKey: 'manifest-tool', description: 'Manifest tool' },
];
mockCache.get.mockResolvedValue(mockManifestPlugins);
getCachedTools.mockResolvedValueOnce({});
convertMCPToolsToPlugins.mockReturnValue(mockUserPlugins);
filterUniquePlugins.mockReturnValue([...mockUserPlugins, ...mockManifestPlugins]);
getCustomConfig.mockResolvedValue(null);
await getAvailableTools(mockReq, mockRes);
// Should be called to deduplicate the combined array
expect(filterUniquePlugins).toHaveBeenLastCalledWith([
...mockUserPlugins,
...mockManifestPlugins,
]);
});
it('should use checkPluginAuth to verify authentication status', async () => {
const mockPlugin = { name: 'Tool1', pluginKey: 'tool1', description: 'Tool 1' };
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockReturnValue([mockPlugin]);
checkPluginAuth.mockReturnValue(true);
getCustomConfig.mockResolvedValue(null);
// Mock getCachedTools second call to return tool definitions
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce({ tool1: true });
await getAvailableTools(mockReq, mockRes);
expect(checkPluginAuth).toHaveBeenCalledWith(mockPlugin);
});
it('should use getToolkitKey for toolkit validation', async () => {
const mockToolkit = {
name: 'Toolkit1',
pluginKey: 'toolkit1',
description: 'Toolkit 1',
toolkit: true,
};
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockReturnValue([mockToolkit]);
checkPluginAuth.mockReturnValue(false);
getToolkitKey.mockReturnValue('toolkit1');
getCustomConfig.mockResolvedValue(null);
// Mock getCachedTools second call to return tool definitions
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce({
toolkit1_function: true,
});
await getAvailableTools(mockReq, mockRes);
expect(getToolkitKey).toHaveBeenCalled();
});
});
describe('plugin.icon behavior', () => {
let mockReq, mockRes, mockCache;
const callGetAvailableToolsWithMCPServer = async (mcpServers) => {
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue({ mcpServers });
@@ -242,22 +50,7 @@ describe('PluginController', () => {
function: { name: 'test-tool', description: 'A test tool' },
},
};
const mockConvertedPlugin = {
name: 'test-tool',
pluginKey: `test-tool${Constants.mcp_delimiter}test-server`,
description: 'A test tool',
icon: mcpServers['test-server']?.iconPath,
authenticated: true,
authConfig: [],
};
getCachedTools.mockResolvedValueOnce(functionTools);
convertMCPToolsToPlugins.mockReturnValue([mockConvertedPlugin]);
filterUniquePlugins.mockImplementation((plugins) => plugins);
checkPluginAuth.mockReturnValue(true);
getToolkitKey.mockReturnValue(undefined);
getCachedTools.mockResolvedValueOnce({
[`test-tool${Constants.mcp_delimiter}test-server`]: true,
});
@@ -267,6 +60,14 @@ describe('PluginController', () => {
return responseData.find((tool) => tool.name === 'test-tool');
};
beforeEach(() => {
jest.clearAllMocks();
mockReq = { user: { id: 'test-user-id' } };
mockRes = { status: jest.fn().mockReturnThis(), json: jest.fn() };
mockCache = { get: jest.fn(), set: jest.fn() };
getLogStores.mockReturnValue(mockCache);
});
it('should set plugin.icon when iconPath is defined', async () => {
const mcpServers = {
'test-server': {
@@ -285,236 +86,4 @@ describe('PluginController', () => {
expect(testTool.icon).toBeUndefined();
});
});
describe('helper function integration', () => {
it('should properly handle MCP tools with custom user variables', async () => {
const customConfig = {
mcpServers: {
'test-server': {
customUserVars: {
API_KEY: { title: 'API Key', description: 'Your API key' },
},
},
},
};
// We need to test the actual flow where MCP manager tools are included
const mcpManagerTools = [
{
name: 'tool1',
pluginKey: `tool1${Constants.mcp_delimiter}test-server`,
description: 'Tool 1',
authenticated: true,
},
];
// Mock the MCP manager to return tools
const mockMCPManager = {
loadManifestTools: jest.fn().mockResolvedValue(mcpManagerTools),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue(customConfig);
// First call returns user tools (empty in this case)
getCachedTools.mockResolvedValueOnce({});
// Mock convertMCPToolsToPlugins to return empty array for user tools
convertMCPToolsToPlugins.mockReturnValue([]);
// Mock filterUniquePlugins to pass through
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
// Mock checkPluginAuth
checkPluginAuth.mockReturnValue(true);
// Second call returns tool definitions
getCachedTools.mockResolvedValueOnce({
[`tool1${Constants.mcp_delimiter}test-server`]: true,
});
await getAvailableTools(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
// Find the MCP tool in the response
const mcpTool = responseData.find(
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}test-server`,
);
// The actual implementation adds authConfig and sets authenticated to false when customUserVars exist
expect(mcpTool).toBeDefined();
expect(mcpTool.authConfig).toEqual([
{ authField: 'API_KEY', label: 'API Key', description: 'Your API key' },
]);
expect(mcpTool.authenticated).toBe(false);
});
it('should handle error cases gracefully', async () => {
mockCache.get.mockRejectedValue(new Error('Cache error'));
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.json).toHaveBeenCalledWith({ message: 'Cache error' });
});
});
describe('edge cases with undefined/null values', () => {
it('should handle undefined cache gracefully', async () => {
getLogStores.mockReturnValue(undefined);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(500);
});
it('should handle null cachedTools and cachedUserTools', async () => {
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue(null);
convertMCPToolsToPlugins.mockReturnValue(undefined);
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
getCustomConfig.mockResolvedValue(null);
await getAvailableTools(mockReq, mockRes);
expect(convertMCPToolsToPlugins).toHaveBeenCalledWith({
functionTools: null,
customConfig: null,
});
});
it('should handle when getCachedTools returns undefined', async () => {
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue(undefined);
convertMCPToolsToPlugins.mockReturnValue(undefined);
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
getCustomConfig.mockResolvedValue(null);
checkPluginAuth.mockReturnValue(false);
// Mock getCachedTools to return undefined for both calls
getCachedTools.mockReset();
getCachedTools.mockResolvedValueOnce(undefined).mockResolvedValueOnce(undefined);
await getAvailableTools(mockReq, mockRes);
expect(convertMCPToolsToPlugins).toHaveBeenCalledWith({
functionTools: undefined,
customConfig: null,
});
});
it('should handle cachedToolsArray and userPlugins both being defined', async () => {
const cachedTools = [{ name: 'CachedTool', pluginKey: 'cached-tool', description: 'Cached' }];
const userTools = {
'user-tool': { function: { name: 'user-tool', description: 'User tool' } },
};
const userPlugins = [{ name: 'UserTool', pluginKey: 'user-tool', description: 'User tool' }];
mockCache.get.mockResolvedValue(cachedTools);
getCachedTools.mockResolvedValue(userTools);
convertMCPToolsToPlugins.mockReturnValue(userPlugins);
filterUniquePlugins.mockReturnValue([...userPlugins, ...cachedTools]);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith([...userPlugins, ...cachedTools]);
});
it('should handle empty toolDefinitions object', async () => {
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockImplementation((plugins) => plugins || []);
getCustomConfig.mockResolvedValue(null);
checkPluginAuth.mockReturnValue(true);
await getAvailableTools(mockReq, mockRes);
// With empty tool definitions, no tools should be in the final output
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should handle MCP tools without customUserVars', async () => {
const customConfig = {
mcpServers: {
'test-server': {
// No customUserVars defined
},
},
};
const mockUserTools = {
[`tool1${Constants.mcp_delimiter}test-server`]: {
function: { name: 'tool1', description: 'Tool 1' },
},
};
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue(customConfig);
getCachedTools.mockResolvedValueOnce(mockUserTools);
const mockPlugin = {
name: 'tool1',
pluginKey: `tool1${Constants.mcp_delimiter}test-server`,
description: 'Tool 1',
authenticated: true,
authConfig: [],
};
convertMCPToolsToPlugins.mockReturnValue([mockPlugin]);
filterUniquePlugins.mockImplementation((plugins) => plugins);
checkPluginAuth.mockReturnValue(true);
getCachedTools.mockResolvedValueOnce({
[`tool1${Constants.mcp_delimiter}test-server`]: true,
});
await getAvailableTools(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData[0].authenticated).toBe(true);
// The actual implementation doesn't set authConfig on tools without customUserVars
expect(responseData[0].authConfig).toEqual([]);
});
it('should handle req.app.locals with undefined filteredTools and includedTools', async () => {
mockReq.app = { locals: {} };
mockCache.get.mockResolvedValue(null);
filterUniquePlugins.mockReturnValue([]);
checkPluginAuth.mockReturnValue(false);
await getAvailablePluginsController(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should handle toolkit with undefined toolDefinitions keys', async () => {
const mockToolkit = {
name: 'Toolkit1',
pluginKey: 'toolkit1',
description: 'Toolkit 1',
toolkit: true,
};
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValue({});
convertMCPToolsToPlugins.mockReturnValue([]);
filterUniquePlugins.mockReturnValue([mockToolkit]);
checkPluginAuth.mockReturnValue(false);
getToolkitKey.mockReturnValue(undefined);
getCustomConfig.mockResolvedValue(null);
// Mock getCachedTools second call to return null
getCachedTools.mockResolvedValueOnce({}).mockResolvedValueOnce(null);
await getAvailableTools(mockReq, mockRes);
// Should handle null toolDefinitions gracefully
expect(mockRes.status).toHaveBeenCalledWith(200);
});
});
});

View File

@@ -99,36 +99,10 @@ const confirm2FA = async (req, res) => {
/**
* Disable 2FA by clearing the stored secret and backup codes.
* Requires verification with either TOTP token or backup code if 2FA is fully enabled.
*/
const disable2FA = async (req, res) => {
try {
const userId = req.user.id;
const { token, backupCode } = req.body;
const user = await getUserById(userId);
if (!user || !user.totpSecret) {
return res.status(400).json({ message: '2FA is not setup for this user' });
}
if (user.twoFactorEnabled) {
const secret = await getTOTPSecret(user.totpSecret);
let isVerified = false;
if (token) {
isVerified = await verifyTOTP(secret, token);
} else if (backupCode) {
isVerified = await verifyBackupCode({ user, backupCode });
} else {
return res
.status(400)
.json({ message: 'Either token or backup code is required to disable 2FA' });
}
if (!isVerified) {
return res.status(401).json({ message: 'Invalid token or backup code' });
}
}
await updateUser(userId, { totpSecret: null, backupCodes: [], twoFactorEnabled: false });
return res.status(200).json();
} catch (err) {

View File

@@ -1,5 +1,5 @@
const { logger } = require('@librechat/data-schemas');
const { webSearchKeys, extractWebSearchEnvVars, normalizeHttpError } = require('@librechat/api');
const { webSearchKeys, extractWebSearchEnvVars } = require('@librechat/api');
const {
getFiles,
updateUser,
@@ -89,8 +89,8 @@ const updateUserPluginsController = async (req, res) => {
if (userPluginsService instanceof Error) {
logger.error('[userPluginsService]', userPluginsService);
const { status, message } = normalizeHttpError(userPluginsService);
return res.status(status).send({ message });
const { status, message } = userPluginsService;
res.status(status).send({ message });
}
}
@@ -137,7 +137,7 @@ const updateUserPluginsController = async (req, res) => {
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
({ status, message } = normalizeHttpError(authService));
({ status, message } = authService);
}
}
} else if (action === 'uninstall') {
@@ -151,7 +151,7 @@ const updateUserPluginsController = async (req, res) => {
`[authService] Error deleting all auth for MCP tool ${pluginKey}:`,
authService,
);
({ status, message } = normalizeHttpError(authService));
({ status, message } = authService);
}
} else {
// This handles:
@@ -163,7 +163,7 @@ const updateUserPluginsController = async (req, res) => {
authService = await deleteUserPluginAuth(user.id, keys[i]); // Deletes by authField name
if (authService instanceof Error) {
logger.error('[authService] Error deleting specific auth key:', authService);
({ status, message } = normalizeHttpError(authService));
({ status, message } = authService);
}
}
}
@@ -193,8 +193,7 @@ const updateUserPluginsController = async (req, res) => {
return res.status(status).send();
}
const normalized = normalizeHttpError({ status, message });
return res.status(normalized.status).send({ message: normalized.message });
res.status(status).send({ message });
} catch (err) {
logger.error('[updateUserPluginsController]', err);
return res.status(500).json({ message: 'Something went wrong.' });

View File

@@ -226,42 +226,6 @@ class AgentClient extends BaseClient {
return files;
}
async addDocuments(message, attachments) {
const documentResult =
await require('~/server/services/Files/documents').encodeAndFormatDocuments(
this.options.req,
attachments,
this.options.agent.provider,
);
message.documents =
documentResult.documents && documentResult.documents.length
? documentResult.documents
: undefined;
return documentResult.files;
}
async processAttachments(message, attachments) {
const [imageFiles, documentFiles] = await Promise.all([
this.addImageURLs(message, attachments),
this.addDocuments(message, attachments),
]);
const allFiles = [...imageFiles, ...documentFiles];
const seenFileIds = new Set();
const uniqueFiles = [];
for (const file of allFiles) {
if (file.file_id && !seenFileIds.has(file.file_id)) {
seenFileIds.add(file.file_id);
uniqueFiles.push(file);
} else if (!file.file_id) {
uniqueFiles.push(file);
}
}
return uniqueFiles;
}
async buildMessages(
messages,
parentMessageId,
@@ -295,7 +259,7 @@ class AgentClient extends BaseClient {
};
}
const files = await this.processAttachments(
const files = await this.addImageURLs(
orderedMessages[orderedMessages.length - 1],
attachments,
);
@@ -318,23 +282,6 @@ class AgentClient extends BaseClient {
assistantName: this.options?.modelLabel,
});
if (
message.documents &&
message.documents.length > 0 &&
message.role === 'user' &&
this.options.agent.provider === EModelEndpoint.anthropic
) {
const contentParts = [];
contentParts.push(...message.documents);
if (message.image_urls && message.image_urls.length > 0) {
contentParts.push(...message.image_urls);
}
const textContent =
typeof formattedMessage.content === 'string' ? formattedMessage.content : '';
contentParts.push({ type: 'text', text: textContent });
formattedMessage.content = contentParts;
}
if (message.ocr && i !== orderedMessages.length - 1) {
if (typeof formattedMessage.content === 'string') {
formattedMessage.content = message.ocr + '\n' + formattedMessage.content;
@@ -455,34 +402,6 @@ class AgentClient extends BaseClient {
return result;
}
/**
* Creates a promise that resolves with the memory promise result or undefined after a timeout
* @param {Promise<(TAttachment | null)[] | undefined>} memoryPromise - The memory promise to await
* @param {number} timeoutMs - Timeout in milliseconds (default: 3000)
* @returns {Promise<(TAttachment | null)[] | undefined>}
*/
async awaitMemoryWithTimeout(memoryPromise, timeoutMs = 3000) {
if (!memoryPromise) {
return;
}
try {
const timeoutPromise = new Promise((_, reject) =>
setTimeout(() => reject(new Error('Memory processing timeout')), timeoutMs),
);
const attachments = await Promise.race([memoryPromise, timeoutPromise]);
return attachments;
} catch (error) {
if (error.message === 'Memory processing timeout') {
logger.warn('[AgentClient] Memory processing timed out after 3 seconds');
} else {
logger.error('[AgentClient] Error processing memory:', error);
}
return;
}
}
/**
* @returns {Promise<string | undefined>}
*/
@@ -593,39 +512,6 @@ class AgentClient extends BaseClient {
return withoutKeys;
}
/**
* Filters out image URLs from message content
* @param {BaseMessage} message - The message to filter
* @returns {BaseMessage} - A new message with image URLs removed
*/
filterImageUrls(message) {
if (!message.content || typeof message.content === 'string') {
return message;
}
if (Array.isArray(message.content)) {
const filteredContent = message.content.filter(
(part) => part.type !== ContentTypes.IMAGE_URL,
);
if (filteredContent.length === 1 && filteredContent[0].type === ContentTypes.TEXT) {
const MessageClass = message.constructor;
return new MessageClass({
content: filteredContent[0].text,
additional_kwargs: message.additional_kwargs,
});
}
const MessageClass = message.constructor;
return new MessageClass({
content: filteredContent,
additional_kwargs: message.additional_kwargs,
});
}
return message;
}
/**
* @param {BaseMessage[]} messages
* @returns {Promise<void | (TAttachment | null)[]>}
@@ -654,8 +540,7 @@ class AgentClient extends BaseClient {
}
}
const filteredMessages = messagesToProcess.map((msg) => this.filterImageUrls(msg));
const bufferString = getBufferString(filteredMessages);
const bufferString = getBufferString(messagesToProcess);
const bufferMessage = new HumanMessage(`# Current Chat:\n\n${bufferString}`);
return await this.processMemory([bufferMessage]);
} catch (error) {
@@ -830,51 +715,6 @@ class AgentClient extends BaseClient {
};
const toolSet = new Set((this.options.agent.tools ?? []).map((tool) => tool && tool.name));
if (
this.options.agent.provider === EModelEndpoint.anthropic &&
payload &&
Array.isArray(payload)
) {
let userMessageWithDocs = null;
if (this.userMessage?.documents) {
userMessageWithDocs = this.userMessage;
} else if (this.currentMessages?.length > 0) {
const lastMessage = this.currentMessages[this.currentMessages.length - 1];
if (lastMessage.documents?.length > 0) {
userMessageWithDocs = lastMessage;
}
} else if (this.messages?.length > 0) {
const lastMessage = this.messages[this.messages.length - 1];
if (lastMessage.documents?.length > 0) {
userMessageWithDocs = lastMessage;
}
}
if (userMessageWithDocs) {
for (const payloadMessage of payload) {
if (
payloadMessage.role === 'user' &&
userMessageWithDocs.text === payloadMessage.content
) {
if (typeof payloadMessage.content === 'string') {
payloadMessage.content = [
...userMessageWithDocs.documents,
{ type: 'text', text: payloadMessage.content },
];
} else if (Array.isArray(payloadMessage.content)) {
payloadMessage.content = [
...userMessageWithDocs.documents,
...payloadMessage.content,
];
}
break;
}
}
}
}
let { messages: initialMessages, indexTokenCountMap } = formatAgentMessages(
payload,
this.indexTokenCountMap,
@@ -1128,9 +968,11 @@ class AgentClient extends BaseClient {
});
try {
const attachments = await this.awaitMemoryWithTimeout(memoryPromise);
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
}
await this.recordCollectedUsage({ context: 'message' });
} catch (err) {
@@ -1140,9 +982,11 @@ class AgentClient extends BaseClient {
);
}
} catch (err) {
const attachments = await this.awaitMemoryWithTimeout(memoryPromise);
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
if (memoryPromise) {
const attachments = await memoryPromise;
if (attachments && attachments.length > 0) {
this.artifactPromises.push(...attachments);
}
}
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
@@ -1244,16 +1088,11 @@ class AgentClient extends BaseClient {
clientOptions.configuration = options.configOptions;
}
const shouldRemoveMaxTokens = /\b(o\d|gpt-[5-9])\b/i.test(clientOptions.model);
if (shouldRemoveMaxTokens && clientOptions.maxTokens != null) {
delete clientOptions.maxTokens;
} else if (!shouldRemoveMaxTokens && !clientOptions.maxTokens) {
// Ensure maxTokens is set for non-o1 models
if (!/\b(o\d)\b/i.test(clientOptions.model) && !clientOptions.maxTokens) {
clientOptions.maxTokens = 75;
}
if (shouldRemoveMaxTokens && clientOptions?.modelKwargs?.max_completion_tokens != null) {
delete clientOptions.modelKwargs.max_completion_tokens;
} else if (shouldRemoveMaxTokens && clientOptions?.modelKwargs?.max_output_tokens != null) {
delete clientOptions.modelKwargs.max_output_tokens;
} else if (/\b(o\d)\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
delete clientOptions.maxTokens;
}
clientOptions = Object.assign(

View File

@@ -727,464 +727,4 @@ describe('AgentClient - titleConvo', () => {
});
});
});
describe('getOptions method - GPT-5+ model handling', () => {
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
model_parameters: {
model: 'gpt-5',
},
};
mockReq = {
app: {
locals: {},
},
user: {
id: 'user-123',
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
client = new AgentClient(mockOptions);
});
it('should move maxTokens to modelKwargs.max_completion_tokens for GPT-5 models', () => {
const clientOptions = {
model: 'gpt-5',
maxTokens: 2048,
temperature: 0.7,
};
// Simulate the getOptions logic that handles GPT-5+ models
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs).toBeDefined();
expect(clientOptions.modelKwargs.max_completion_tokens).toBe(2048);
expect(clientOptions.temperature).toBe(0.7); // Other options should remain
});
it('should move maxTokens to modelKwargs.max_output_tokens for GPT-5 models with useResponsesApi', () => {
const clientOptions = {
model: 'gpt-5',
maxTokens: 2048,
temperature: 0.7,
useResponsesApi: true,
};
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
const paramName =
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
clientOptions.modelKwargs[paramName] = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs).toBeDefined();
expect(clientOptions.modelKwargs.max_output_tokens).toBe(2048);
expect(clientOptions.temperature).toBe(0.7); // Other options should remain
});
it('should handle GPT-5+ models with existing modelKwargs', () => {
const clientOptions = {
model: 'gpt-6',
maxTokens: 1500,
temperature: 0.8,
modelKwargs: {
customParam: 'value',
},
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs).toEqual({
customParam: 'value',
max_completion_tokens: 1500,
});
});
it('should not modify maxTokens for non-GPT-5+ models', () => {
const clientOptions = {
model: 'gpt-4',
maxTokens: 2048,
temperature: 0.7,
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
// Should not be modified since it's GPT-4
expect(clientOptions.maxTokens).toBe(2048);
expect(clientOptions.modelKwargs).toBeUndefined();
});
it('should handle various GPT-5+ model formats', () => {
const testCases = [
{ model: 'gpt-5', shouldTransform: true },
{ model: 'gpt-5-turbo', shouldTransform: true },
{ model: 'gpt-6', shouldTransform: true },
{ model: 'gpt-7-preview', shouldTransform: true },
{ model: 'gpt-8', shouldTransform: true },
{ model: 'gpt-9-mini', shouldTransform: true },
{ model: 'gpt-4', shouldTransform: false },
{ model: 'gpt-4o', shouldTransform: false },
{ model: 'gpt-3.5-turbo', shouldTransform: false },
{ model: 'claude-3', shouldTransform: false },
];
testCases.forEach(({ model, shouldTransform }) => {
const clientOptions = {
model,
maxTokens: 1000,
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
if (shouldTransform) {
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs?.max_completion_tokens).toBe(1000);
} else {
expect(clientOptions.maxTokens).toBe(1000);
expect(clientOptions.modelKwargs).toBeUndefined();
}
});
});
it('should not swap max token param for older models when using useResponsesApi', () => {
const testCases = [
{ model: 'gpt-5', shouldTransform: true },
{ model: 'gpt-5-turbo', shouldTransform: true },
{ model: 'gpt-6', shouldTransform: true },
{ model: 'gpt-7-preview', shouldTransform: true },
{ model: 'gpt-8', shouldTransform: true },
{ model: 'gpt-9-mini', shouldTransform: true },
{ model: 'gpt-4', shouldTransform: false },
{ model: 'gpt-4o', shouldTransform: false },
{ model: 'gpt-3.5-turbo', shouldTransform: false },
{ model: 'claude-3', shouldTransform: false },
];
testCases.forEach(({ model, shouldTransform }) => {
const clientOptions = {
model,
maxTokens: 1000,
useResponsesApi: true,
};
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
const paramName =
clientOptions.useResponsesApi === true ? 'max_output_tokens' : 'max_completion_tokens';
clientOptions.modelKwargs[paramName] = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
if (shouldTransform) {
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs?.max_output_tokens).toBe(1000);
} else {
expect(clientOptions.maxTokens).toBe(1000);
expect(clientOptions.modelKwargs).toBeUndefined();
}
});
});
it('should not transform if maxTokens is null or undefined', () => {
const testCases = [
{ model: 'gpt-5', maxTokens: null },
{ model: 'gpt-5', maxTokens: undefined },
{ model: 'gpt-6', maxTokens: 0 }, // Should transform even if 0
];
testCases.forEach(({ model, maxTokens }, index) => {
const clientOptions = {
model,
maxTokens,
temperature: 0.7,
};
// Simulate the getOptions logic
if (/\bgpt-[5-9]\b/i.test(clientOptions.model) && clientOptions.maxTokens != null) {
clientOptions.modelKwargs = clientOptions.modelKwargs ?? {};
clientOptions.modelKwargs.max_completion_tokens = clientOptions.maxTokens;
delete clientOptions.maxTokens;
}
if (index < 2) {
// null or undefined cases
expect(clientOptions.maxTokens).toBe(maxTokens);
expect(clientOptions.modelKwargs).toBeUndefined();
} else {
// 0 case - should transform
expect(clientOptions.maxTokens).toBeUndefined();
expect(clientOptions.modelKwargs?.max_completion_tokens).toBe(0);
}
});
});
});
describe('runMemory method', () => {
let client;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
let mockProcessMemory;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
model_parameters: {
model: 'gpt-4',
},
};
mockReq = {
app: {
locals: {
memory: {
messageWindowSize: 3,
},
},
},
user: {
id: 'user-123',
personalization: {
memories: true,
},
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
mockProcessMemory = jest.fn().mockResolvedValue([]);
client = new AgentClient(mockOptions);
client.processMemory = mockProcessMemory;
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
});
it('should filter out image URLs from message content', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'What is in this image?',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==',
detail: 'auto',
},
},
],
}),
new AIMessage('I can see a small red pixel in the image.'),
new HumanMessage({
content: [
{
type: 'text',
text: 'What about this one?',
},
{
type: 'image_url',
image_url: {
url: 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/',
detail: 'high',
},
},
],
}),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Verify the buffer message was created
expect(processedMessage.constructor.name).toBe('HumanMessage');
expect(processedMessage.content).toContain('# Current Chat:');
// Verify that image URLs are not in the buffer string
expect(processedMessage.content).not.toContain('image_url');
expect(processedMessage.content).not.toContain('data:image');
expect(processedMessage.content).not.toContain('base64');
// Verify text content is preserved
expect(processedMessage.content).toContain('What is in this image?');
expect(processedMessage.content).toContain('I can see a small red pixel in the image.');
expect(processedMessage.content).toContain('What about this one?');
});
it('should handle messages with only text content', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage('Hello, how are you?'),
new AIMessage('I am doing well, thank you!'),
new HumanMessage('That is great to hear.'),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
expect(processedMessage.content).toContain('Hello, how are you?');
expect(processedMessage.content).toContain('I am doing well, thank you!');
expect(processedMessage.content).toContain('That is great to hear.');
});
it('should handle mixed content types correctly', async () => {
const { HumanMessage } = require('@langchain/core/messages');
const { ContentTypes } = require('librechat-data-provider');
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'Here is some text',
},
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: 'https://example.com/image.png',
},
},
{
type: 'text',
text: ' and more text',
},
],
}),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Should contain text parts but not image URLs
expect(processedMessage.content).toContain('Here is some text');
expect(processedMessage.content).toContain('and more text');
expect(processedMessage.content).not.toContain('example.com/image.png');
expect(processedMessage.content).not.toContain('IMAGE_URL');
});
it('should preserve original messages without mutation', async () => {
const { HumanMessage } = require('@langchain/core/messages');
const originalContent = [
{
type: 'text',
text: 'Original text',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,ABC123',
},
},
];
const messages = [
new HumanMessage({
content: [...originalContent],
}),
];
await client.runMemory(messages);
// Verify original message wasn't mutated
expect(messages[0].content).toHaveLength(2);
expect(messages[0].content[1].type).toBe('image_url');
expect(messages[0].content[1].image_url.url).toBe('data:image/png;base64,ABC123');
});
it('should handle message window size correctly', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage('Message 1'),
new AIMessage('Response 1'),
new HumanMessage('Message 2'),
new AIMessage('Response 2'),
new HumanMessage('Message 3'),
new AIMessage('Response 3'),
];
// Window size is set to 3 in mockReq
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Should only include last 3 messages due to window size
expect(processedMessage.content).toContain('Message 3');
expect(processedMessage.content).toContain('Response 3');
expect(processedMessage.content).not.toContain('Message 1');
expect(processedMessage.content).not.toContain('Response 1');
});
it('should return early if processMemory is not set', async () => {
const { HumanMessage } = require('@langchain/core/messages');
client.processMemory = null;
const result = await client.runMemory([new HumanMessage('Test')]);
expect(result).toBeUndefined();
expect(mockProcessMemory).not.toHaveBeenCalled();
});
});
});

View File

@@ -105,6 +105,8 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
return res.end();
}
await cache.delete(cacheKey);
// const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
// logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
}
@@ -113,6 +115,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let run;
try {
// run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
@@ -125,9 +128,18 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let finalEvent;
try {
// const errorContentPart = {
// text: {
// value:
// error?.message ?? 'There was an error processing your request. Please try again later.',
// },
// type: ContentTypes.ERROR,
// };
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
// runMessages,
};
} catch (error) {
logger.error(`[${originPath}] Error finalizing error process`, error);

View File

@@ -233,26 +233,6 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
);
}
}
// Edge case: sendMessage completed but abort happened during sendCompletion
// We need to ensure a final event is sent
else if (!res.headersSent && !res.finished) {
logger.debug(
'[AgentController] Handling edge case: `sendMessage` completed but aborted during `sendCompletion`',
);
const finalResponse = { ...response };
finalResponse.error = true;
sendEvent(res, {
final: true,
conversation,
title: conversation.title,
requestMessage: userMessage,
responseMessage: finalResponse,
error: { message: 'Request was aborted during completion' },
});
res.end();
}
// Save user message if needed
if (!client.skipSaveUserMessage) {

View File

@@ -194,9 +194,6 @@ const updateAgentHandler = async (req, res) => {
});
}
// Add version count to the response
updatedAgent.version = updatedAgent.versions ? updatedAgent.versions.length : 0;
if (updatedAgent.author) {
updatedAgent.author = updatedAgent.author.toString();
}

View File

@@ -498,28 +498,6 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
expect(mockRes.json).toHaveBeenCalledWith({ error: 'Agent not found' });
});
test('should include version field in update response', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Updated with Version Check',
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
// Verify version field is included and is a number
expect(updatedAgent).toHaveProperty('version');
expect(typeof updatedAgent.version).toBe('number');
expect(updatedAgent.version).toBeGreaterThanOrEqual(1);
// Verify in database
const agentInDb = await Agent.findOne({ id: existingAgentId });
expect(updatedAgent.version).toBe(agentInDb.versions.length);
});
test('should handle validation errors properly', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;

View File

@@ -152,7 +152,7 @@ const chatV1 = async (req, res) => {
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[/assistants/chat/] Error cancelling run', error);
@@ -162,7 +162,7 @@ const chatV1 = async (req, res) => {
let run;
try {
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
@@ -623,7 +623,7 @@ const chatV1 = async (req, res) => {
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,

View File

@@ -467,7 +467,7 @@ const chatV2 = async (req, res) => {
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,

View File

@@ -108,7 +108,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
@@ -118,7 +118,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let run;
try {
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,

View File

@@ -173,16 +173,6 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
};
};
/**
* Initializes the OpenAI client.
* @param {object} params - The parameters object.
* @param {ServerRequest} params.req - The request object.
* @param {ServerResponse} params.res - The response object.
* @param {TEndpointOption} params.endpointOption - The endpoint options.
* @param {boolean} params.initAppClient - Whether to initialize the app client.
* @param {string} params.overrideEndpoint - The endpoint to override.
* @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client.
*/
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
const version = await getCurrentVersion(req, endpoint);

View File

@@ -197,7 +197,7 @@ const deleteAssistant = async (req, res) => {
await validateAuthor({ req, openai });
const assistant_id = req.params.id;
const deletionStatus = await openai.beta.assistants.delete(assistant_id);
const deletionStatus = await openai.beta.assistants.del(assistant_id);
if (deletionStatus?.deleted) {
await deleteAssistantActions({ req, assistant_id });
}
@@ -365,7 +365,7 @@ const uploadAssistantAvatar = async (req, res) => {
try {
await fs.unlink(req.file.path);
logger.debug('[/:agent_id/avatar] Temp. image upload file deleted');
} catch {
} catch (error) {
logger.debug('[/:agent_id/avatar] Temp. image upload file already deleted');
}
}

View File

@@ -8,12 +8,14 @@ const express = require('express');
const passport = require('passport');
const compression = require('compression');
const cookieParser = require('cookie-parser');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const mongoSanitize = require('express-mongo-sanitize');
const { isEnabled, ErrorController } = require('@librechat/api');
const { connectDb, indexSync } = require('~/db');
const validateImageRequest = require('./middleware/validateImageRequest');
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
const errorController = require('./controllers/ErrorController');
const initializeMCPs = require('./services/initializeMCPs');
const configureSocialLogins = require('./socialLogins');
const AppService = require('./services/AppService');
@@ -118,7 +120,8 @@ const startServer = async () => {
app.use('/api/tags', routes.tags);
app.use('/api/mcp', routes.mcp);
app.use(ErrorController);
// Add the error controller one more time after all routes
app.use(errorController);
app.use((req, res) => {
res.set({

View File

@@ -92,7 +92,7 @@ async function healthCheckPoll(app, retries = 0) {
if (response.status === 200) {
return; // App is healthy
}
} catch {
} catch (error) {
// Ignore connection errors during polling
}

View File

@@ -47,7 +47,7 @@ async function abortRun(req, res) {
try {
await cache.set(cacheKey, 'cancelled', three_minutes);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('[abortRun] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[abortRun] Error cancelling run', error);
@@ -60,7 +60,7 @@ async function abortRun(req, res) {
}
try {
const run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,

View File

@@ -33,7 +33,7 @@ const validateModel = async (req, res, next) => {
return next();
}
const { ILLEGAL_MODEL_REQ_SCORE: score = 1 } = process.env ?? {};
const { ILLEGAL_MODEL_REQ_SCORE: score = 5 } = process.env ?? {};
const type = ViolationTypes.ILLEGAL_MODEL_REQUEST;
const errorMessage = {

File diff suppressed because it is too large Load Diff

View File

@@ -1,11 +1,10 @@
const express = require('express');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, defaultSocialLogins, Constants } = require('librechat-data-provider');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const { getLdapConfig } = require('~/server/services/Config/ldap');
const { getProjectByName } = require('~/models/Project');
const { getMCPManager } = require('~/config');
const { isEnabled } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const router = express.Router();
@@ -103,16 +102,11 @@ router.get('/', async function (req, res) {
payload.mcpServers = {};
const config = await getCustomConfig();
if (config?.mcpServers != null) {
const mcpManager = getMCPManager();
const oauthServers = mcpManager.getOAuthServers();
for (const serverName in config.mcpServers) {
const serverConfig = config.mcpServers[serverName];
payload.mcpServers[serverName] = {
customUserVars: serverConfig?.customUserVars || {},
chatMenu: serverConfig?.chatMenu,
isOAuth: oauthServers.has(serverName),
startup: serverConfig?.startup,
};
}
}

View File

@@ -111,7 +111,7 @@ router.delete('/', async (req, res) => {
/** @type {{ openai: OpenAI }} */
const { openai } = await assistantClients[endpoint].initializeClient({ req, res });
try {
const response = await openai.beta.threads.delete(thread_id);
const response = await openai.beta.threads.del(thread_id);
logger.debug('Deleted OpenAI thread:', response);
} catch (error) {
logger.error('Error deleting OpenAI thread:', error);

View File

@@ -413,15 +413,13 @@ router.post('/', async (req, res) => {
logger.error('[/files] Error deleting file:', error);
}
res.status(500).json({ message });
} finally {
if (cleanup) {
try {
await fs.unlink(req.file.path);
} catch (error) {
logger.error('[/files] Error deleting file after file processing:', error);
}
} else {
logger.debug('[/files] File processing completed without cleanup');
}
if (cleanup) {
try {
await fs.unlink(req.file.path);
} catch (error) {
logger.error('[/files] Error deleting file after file processing:', error);
}
}
});

View File

@@ -4,7 +4,6 @@ const { MCPOAuthHandler } = require('@librechat/api');
const { CacheKeys, Constants } = require('librechat-data-provider');
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
const { setCachedTools, getCachedTools, loadCustomConfig } = require('~/server/services/Config');
const { getMCPSetupData, getServerConnectionStatus } = require('~/server/services/MCP');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { requireJwtAuth } = require('~/server/middleware');
@@ -93,6 +92,7 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
return res.redirect('/oauth/error?error=missing_state');
}
// Extract flow ID from state
const flowId = state;
logger.debug('[MCP OAuth] Using flow ID from state', { flowId });
@@ -115,17 +115,22 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
hasCodeVerifier: !!flowState.codeVerifier,
});
// Complete the OAuth flow
logger.debug('[MCP OAuth] Completing OAuth flow');
const tokens = await MCPOAuthHandler.completeOAuthFlow(flowId, code, flowManager);
logger.info('[MCP OAuth] OAuth flow completed, tokens received in callback route');
// Try to establish the MCP connection with the new tokens
try {
const mcpManager = getMCPManager(flowState.userId);
logger.debug(`[MCP OAuth] Attempting to reconnect ${serverName} with new OAuth tokens`);
// For user-level OAuth, try to establish the connection
if (flowState.userId !== 'system') {
// We need to get the user object - in this case we'll need to reconstruct it
const user = { id: flowState.userId };
// Try to establish connection with the new tokens
const userConnection = await mcpManager.getUserConnection({
user,
serverName,
@@ -142,8 +147,10 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
`[MCP OAuth] Successfully reconnected ${serverName} for user ${flowState.userId}`,
);
// Fetch and cache tools now that we have a successful connection
const userTools = (await getCachedTools({ userId: flowState.userId })) || {};
// Remove any old tools from this server in the user's cache
const mcpDelimiter = Constants.mcp_delimiter;
for (const key of Object.keys(userTools)) {
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
@@ -151,6 +158,7 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
}
}
// Add the new tools from this server
const tools = await userConnection.fetchTools();
for (const tool of tools) {
const name = `${tool.name}${Constants.mcp_delimiter}${serverName}`;
@@ -164,6 +172,7 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
};
}
// Save the updated user tool cache
await setCachedTools(userTools, { userId: flowState.userId });
logger.debug(
@@ -173,6 +182,7 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
logger.debug(`[MCP OAuth] System-level OAuth completed for ${serverName}`);
}
} catch (error) {
// Don't fail the OAuth callback if reconnection fails - the tokens are still saved
logger.warn(
`[MCP OAuth] Failed to reconnect ${serverName} after OAuth, but tokens are saved:`,
error,
@@ -208,6 +218,7 @@ router.get('/oauth/tokens/:flowId', requireJwtAuth, async (req, res) => {
return res.status(401).json({ error: 'User not authenticated' });
}
// Allow system flows or user-owned flows
if (!flowId.startsWith(`${user.id}:`) && !flowId.startsWith('system:')) {
return res.status(403).json({ error: 'Access denied' });
}
@@ -275,7 +286,11 @@ router.post('/oauth/cancel/:serverName', requireJwtAuth, async (req, res) => {
const flowsCache = getLogStores(CacheKeys.FLOWS);
const flowManager = getFlowStateManager(flowsCache);
// Generate the flow ID for this user/server combination
const flowId = MCPOAuthHandler.generateFlowId(user.id, serverName);
// Check if flow exists
const flowState = await flowManager.getFlowState(flowId, 'mcp_oauth');
if (!flowState) {
@@ -286,7 +301,8 @@ router.post('/oauth/cancel/:serverName', requireJwtAuth, async (req, res) => {
});
}
await flowManager.failFlow(flowId, 'mcp_oauth', 'User cancelled OAuth flow');
// Cancel the flow by marking it as failed
await flowManager.completeFlow(flowId, 'mcp_oauth', null, 'User cancelled OAuth flow');
logger.info(`[MCP OAuth Cancel] Successfully cancelled OAuth flow for ${serverName}`);
@@ -337,7 +353,9 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
for (const varName of Object.keys(serverConfig.customUserVars)) {
try {
const value = await getUserPluginAuthValue(user.id, varName, false);
customUserVars[varName] = value;
if (value) {
customUserVars[varName] = value;
}
} catch (err) {
logger.error(`[MCP Reinitialize] Error fetching ${varName} for user ${user.id}:`, err);
}
@@ -360,7 +378,8 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
createToken,
deleteTokens,
},
returnOnOAuth: true,
returnOnOAuth: true, // Return immediately when OAuth is initiated
// Add OAuth handlers to capture the OAuth URL when needed
oauthStart: async (authURL) => {
logger.info(`[MCP Reinitialize] OAuth URL received: ${authURL}`);
oauthUrl = authURL;
@@ -375,6 +394,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
`[MCP Reinitialize] OAuth state - oauthRequired: ${oauthRequired}, oauthUrl: ${oauthUrl ? 'present' : 'null'}`,
);
// Check if this is an OAuth error - if so, the flow state should be set up now
const isOAuthError =
err.message?.includes('OAuth') ||
err.message?.includes('authentication') ||
@@ -387,6 +407,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
`[MCP Reinitialize] OAuth required for ${serverName} (isOAuthError: ${isOAuthError}, oauthRequired: ${oauthRequired}, isOAuthFlowInitiated: ${isOAuthFlowInitiated})`,
);
oauthRequired = true;
// Don't return error - continue so frontend can handle OAuth
} else {
logger.error(
`[MCP Reinitialize] Error initializing MCP server ${serverName} for user:`,
@@ -396,9 +417,11 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
}
}
// Only fetch and cache tools if we successfully connected (no OAuth required)
if (userConnection && !oauthRequired) {
const userTools = (await getCachedTools({ userId: user.id })) || {};
// Remove any old tools from this server in the user's cache
const mcpDelimiter = Constants.mcp_delimiter;
for (const key of Object.keys(userTools)) {
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
@@ -406,6 +429,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
}
}
// Add the new tools from this server
const tools = await userConnection.fetchTools();
for (const tool of tools) {
const name = `${tool.name}${Constants.mcp_delimiter}${serverName}`;
@@ -419,6 +443,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
};
}
// Save the updated user tool cache
await setCachedTools(userTools, { userId: user.id });
}
@@ -426,19 +451,11 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
`[MCP Reinitialize] Sending response for ${serverName} - oauthRequired: ${oauthRequired}, oauthUrl: ${oauthUrl ? 'present' : 'null'}`,
);
const getResponseMessage = () => {
if (oauthRequired) {
return `MCP server '${serverName}' ready for OAuth authentication`;
}
if (userConnection) {
return `MCP server '${serverName}' reinitialized successfully`;
}
return `Failed to reinitialize MCP server '${serverName}'`;
};
res.json({
success: (userConnection && !oauthRequired) || (oauthRequired && oauthUrl),
message: getResponseMessage(),
success: true,
message: oauthRequired
? `MCP server '${serverName}' ready for OAuth authentication`
: `MCP server '${serverName}' reinitialized successfully`,
serverName,
oauthRequired,
oauthUrl,
@@ -451,7 +468,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
/**
* Get connection status for all MCP servers
* This endpoint returns all app level and user-scoped connection statuses from MCPManager without disconnecting idle connections
* This endpoint returns the actual connection status from MCPManager without disconnecting idle connections
*/
router.get('/connection/status', requireJwtAuth, async (req, res) => {
try {
@@ -461,19 +478,84 @@ router.get('/connection/status', requireJwtAuth, async (req, res) => {
return res.status(401).json({ error: 'User not authenticated' });
}
const { mcpConfig, appConnections, userConnections, oauthServers } = await getMCPSetupData(
user.id,
);
const mcpManager = getMCPManager(user.id);
const connectionStatus = {};
const printConfig = false;
const config = await loadCustomConfig(printConfig);
const mcpConfig = config?.mcpServers;
const appConnections = mcpManager.getAllConnections() || new Map();
const userConnections = mcpManager.getUserConnections(user.id) || new Map();
const oauthServers = mcpManager.getOAuthServers() || new Set();
if (!mcpConfig) {
return res.status(404).json({ error: 'MCP config not found' });
}
// Get flow manager to check for active/timed-out OAuth flows
const flowsCache = getLogStores(CacheKeys.FLOWS);
const flowManager = getFlowStateManager(flowsCache);
for (const [serverName] of Object.entries(mcpConfig)) {
connectionStatus[serverName] = await getServerConnectionStatus(
user.id,
serverName,
appConnections,
userConnections,
oauthServers,
);
const getConnectionState = (serverName) =>
appConnections.get(serverName)?.connectionState ??
userConnections.get(serverName)?.connectionState ??
'disconnected';
const baseConnectionState = getConnectionState(serverName);
let hasActiveOAuthFlow = false;
let hasFailedOAuthFlow = false;
if (baseConnectionState === 'disconnected' && oauthServers.has(serverName)) {
try {
// Check for user-specific OAuth flows
const flowId = MCPOAuthHandler.generateFlowId(user.id, serverName);
const flowState = await flowManager.getFlowState(flowId, 'mcp_oauth');
if (flowState) {
// Check if flow failed or timed out
const flowAge = Date.now() - flowState.createdAt;
const flowTTL = flowState.ttl || 180000; // Default 3 minutes
if (flowState.status === 'FAILED' || flowAge > flowTTL) {
hasFailedOAuthFlow = true;
logger.debug(`[MCP Connection Status] Found failed OAuth flow for ${serverName}`, {
flowId,
status: flowState.status,
flowAge,
flowTTL,
timedOut: flowAge > flowTTL,
});
} else if (flowState.status === 'PENDING') {
hasActiveOAuthFlow = true;
logger.debug(`[MCP Connection Status] Found active OAuth flow for ${serverName}`, {
flowId,
flowAge,
flowTTL,
});
}
}
} catch (error) {
logger.error(
`[MCP Connection Status] Error checking OAuth flows for ${serverName}:`,
error,
);
}
}
// Determine the final connection state
let finalConnectionState = baseConnectionState;
if (hasFailedOAuthFlow) {
finalConnectionState = 'error'; // Report as error if OAuth failed
} else if (hasActiveOAuthFlow && baseConnectionState === 'disconnected') {
finalConnectionState = 'connecting'; // Still waiting for OAuth
}
connectionStatus[serverName] = {
requiresOAuth: oauthServers.has(serverName),
connectionState: finalConnectionState,
};
}
res.json({
@@ -481,63 +563,11 @@ router.get('/connection/status', requireJwtAuth, async (req, res) => {
connectionStatus,
});
} catch (error) {
if (error.message === 'MCP config not found') {
return res.status(404).json({ error: error.message });
}
logger.error('[MCP Connection Status] Failed to get connection status', error);
res.status(500).json({ error: 'Failed to get connection status' });
}
});
/**
* Get connection status for a single MCP server
* This endpoint returns the connection status for a specific server for a given user
*/
router.get('/connection/status/:serverName', requireJwtAuth, async (req, res) => {
try {
const user = req.user;
const { serverName } = req.params;
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
const { mcpConfig, appConnections, userConnections, oauthServers } = await getMCPSetupData(
user.id,
);
if (!mcpConfig[serverName]) {
return res
.status(404)
.json({ error: `MCP server '${serverName}' not found in configuration` });
}
const serverStatus = await getServerConnectionStatus(
user.id,
serverName,
appConnections,
userConnections,
oauthServers,
);
res.json({
success: true,
serverName,
connectionStatus: serverStatus.connectionState,
requiresOAuth: serverStatus.requiresOAuth,
});
} catch (error) {
if (error.message === 'MCP config not found') {
return res.status(404).json({ error: error.message });
}
logger.error(
`[MCP Per-Server Status] Failed to get connection status for ${req.params.serverName}`,
error,
);
res.status(500).json({ error: 'Failed to get connection status' });
}
});
/**
* Check which authentication values exist for a specific MCP server
* This endpoint returns only boolean flags indicating if values are set, not the actual values
@@ -563,16 +593,19 @@ router.get('/:serverName/auth-values', requireJwtAuth, async (req, res) => {
const pluginKey = `${Constants.mcp_prefix}${serverName}`;
const authValueFlags = {};
// Check existence of saved values for each custom user variable (don't fetch actual values)
if (serverConfig.customUserVars && typeof serverConfig.customUserVars === 'object') {
for (const varName of Object.keys(serverConfig.customUserVars)) {
try {
const value = await getUserPluginAuthValue(user.id, varName, false, pluginKey);
// Only store boolean flag indicating if value exists
authValueFlags[varName] = !!(value && value.length > 0);
} catch (err) {
logger.error(
`[MCP Auth Value Flags] Error checking ${varName} for user ${user.id}:`,
err,
);
// Default to false if we can't check
authValueFlags[varName] = false;
}
}

View File

@@ -13,8 +13,6 @@ const { getRoleByName } = require('~/models/Role');
const router = express.Router();
const memoryPayloadLimit = express.json({ limit: '100kb' });
const checkMemoryRead = generateCheckAccess({
permissionType: PermissionTypes.MEMORIES,
permissions: [Permissions.USE, Permissions.READ],
@@ -62,7 +60,6 @@ router.get('/', checkMemoryRead, async (req, res) => {
const memoryConfig = req.app.locals?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
const charLimit = memoryConfig?.charLimit || 10000;
let usagePercentage = null;
if (tokenLimit && tokenLimit > 0) {
@@ -73,7 +70,6 @@ router.get('/', checkMemoryRead, async (req, res) => {
memories: sortedMemories,
totalTokens,
tokenLimit: tokenLimit || null,
charLimit,
usagePercentage,
});
} catch (error) {
@@ -87,7 +83,7 @@ router.get('/', checkMemoryRead, async (req, res) => {
* Body: { key: string, value: string }
* Returns 201 and { created: true, memory: <createdDoc> } when successful.
*/
router.post('/', memoryPayloadLimit, checkMemoryCreate, async (req, res) => {
router.post('/', checkMemoryCreate, async (req, res) => {
const { key, value } = req.body;
if (typeof key !== 'string' || key.trim() === '') {
@@ -98,25 +94,13 @@ router.post('/', memoryPayloadLimit, checkMemoryCreate, async (req, res) => {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
const memoryConfig = req.app.locals?.memory;
const charLimit = memoryConfig?.charLimit || 10000;
if (key.length > 1000) {
return res.status(400).json({
error: `Key exceeds maximum length of 1000 characters. Current length: ${key.length} characters.`,
});
}
if (value.length > charLimit) {
return res.status(400).json({
error: `Value exceeds maximum length of ${charLimit} characters. Current length: ${value.length} characters.`,
});
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
// Check token limit
const memoryConfig = req.app.locals?.memory;
const tokenLimit = memoryConfig?.tokenLimit;
if (tokenLimit) {
@@ -191,7 +175,7 @@ router.patch('/preferences', checkMemoryOptOut, async (req, res) => {
* Body: { key?: string, value: string }
* Returns 200 and { updated: true, memory: <updatedDoc> } when successful.
*/
router.patch('/:key', memoryPayloadLimit, checkMemoryUpdate, async (req, res) => {
router.patch('/:key', checkMemoryUpdate, async (req, res) => {
const { key: urlKey } = req.params;
const { key: bodyKey, value } = req.body || {};
@@ -199,23 +183,9 @@ router.patch('/:key', memoryPayloadLimit, checkMemoryUpdate, async (req, res) =>
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
// Use the key from the body if provided, otherwise use the key from the URL
const newKey = bodyKey || urlKey;
const memoryConfig = req.app.locals?.memory;
const charLimit = memoryConfig?.charLimit || 10000;
if (newKey.length > 1000) {
return res.status(400).json({
error: `Key exceeds maximum length of 1000 characters. Current length: ${newKey.length} characters.`,
});
}
if (value.length > charLimit) {
return res.status(400).json({
error: `Value exceeds maximum length of ${charLimit} characters. Current length: ${value.length} characters.`,
});
}
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
@@ -226,6 +196,7 @@ router.patch('/:key', memoryPayloadLimit, checkMemoryUpdate, async (req, res) =>
return res.status(404).json({ error: 'Memory not found.' });
}
// If the key is changing, we need to handle it specially
if (newKey !== urlKey) {
const keyExists = memories.find((m) => m.key === newKey);
if (keyExists) {
@@ -248,6 +219,7 @@ router.patch('/:key', memoryPayloadLimit, checkMemoryUpdate, async (req, res) =>
return res.status(500).json({ error: 'Failed to delete old memory.' });
}
} else {
// Key is not changing, just update the value
const result = await setMemory({
userId: req.user.id,
key: newKey,

View File

@@ -1,10 +1,7 @@
// file deepcode ignore NoRateLimitingForLogin: Rate limiting is handled by the `loginLimiter` middleware
const express = require('express');
const passport = require('passport');
const { isEnabled } = require('@librechat/api');
const { randomState } = require('openid-client');
const { logger } = require('@librechat/data-schemas');
const { ErrorTypes } = require('librechat-data-provider');
const {
checkBan,
logHeaders,
@@ -13,6 +10,8 @@ const {
checkDomainAllowed,
} = require('~/server/middleware');
const { setAuthTokens, setOpenIDAuthTokens } = require('~/server/services/AuthService');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();
@@ -47,13 +46,13 @@ const oauthHandler = async (req, res) => {
};
router.get('/error', (req, res) => {
/** A single error message is pushed by passport when authentication fails. */
const errorMessage = req.session?.messages?.pop() || 'Unknown error';
// A single error message is pushed by passport when authentication fails.
logger.error('Error in OAuth authentication:', {
message: errorMessage,
message: req.session?.messages?.pop() || 'Unknown error',
});
res.redirect(`${domains.client}/login?redirect=false&error=${ErrorTypes.AUTH_FAILED}`);
// Redirect to login page with auth_failed parameter to prevent infinite redirect loops
res.redirect(`${domains.client}/login?redirect=false`);
});
/**

View File

@@ -1,8 +1,9 @@
const { loadMemoryConfig, agentsConfigSetup, loadWebSearchConfig } = require('@librechat/api');
const { agentsConfigSetup, loadWebSearchConfig } = require('@librechat/api');
const {
FileSources,
loadOCRConfig,
EModelEndpoint,
loadMemoryConfig,
getConfigDefaults,
} = require('librechat-data-provider');
const {

View File

@@ -60,14 +60,7 @@ const replaceArtifactContent = (originalText, artifact, original, updated) => {
// Find boundaries between ARTIFACT_START and ARTIFACT_END
const contentStart = artifactContent.indexOf('\n', artifactContent.indexOf(ARTIFACT_START)) + 1;
let contentEnd = artifactContent.lastIndexOf(ARTIFACT_END);
// Special case: if contentEnd is 0, it means the only ::: found is at the start of :::artifact
// This indicates an incomplete artifact (no closing :::)
// We need to check that it's exactly at position 0 (the beginning of artifactContent)
if (contentEnd === 0 && artifactContent.indexOf(ARTIFACT_START) === 0) {
contentEnd = artifactContent.length;
}
const contentEnd = artifactContent.lastIndexOf(ARTIFACT_END);
if (contentStart === -1 || contentEnd === -1) {
return null;
@@ -79,20 +72,12 @@ const replaceArtifactContent = (originalText, artifact, original, updated) => {
// Determine where to look for the original content
let searchStart, searchEnd;
if (codeBlockStart !== -1) {
// Code block starts
if (codeBlockStart !== -1 && codeBlockEnd !== -1) {
// If code blocks exist, search between them
searchStart = codeBlockStart + 4; // after ```\n
if (codeBlockEnd !== -1 && codeBlockEnd > codeBlockStart) {
// Code block has proper ending
searchEnd = codeBlockEnd;
} else {
// No closing backticks found or they're before the opening (shouldn't happen)
// This might be an incomplete artifact - search to contentEnd
searchEnd = contentEnd;
}
searchEnd = codeBlockEnd;
} else {
// No code blocks at all
// Otherwise search in the whole artifact content
searchStart = contentStart;
searchEnd = contentEnd;
}

View File

@@ -89,9 +89,9 @@ describe('replaceArtifactContent', () => {
};
test('should replace content within artifact boundaries', () => {
const original = "console.log('hello')";
const original = 'console.log(\'hello\')';
const artifact = createTestArtifact(original);
const updated = "console.log('updated')";
const updated = 'console.log(\'updated\')';
const result = replaceArtifactContent(artifact.text, artifact, original, updated);
expect(result).toContain(updated);
@@ -317,182 +317,4 @@ console.log(greeting);`;
expect(result).not.toContain('\n\n```');
expect(result).not.toContain('```\n\n');
});
describe('incomplete artifacts', () => {
test('should handle incomplete artifacts (missing closing ::: and ```)', () => {
const original = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Pomodoro</title>
<meta name="description" content="A single-file Pomodoro timer with logs, charts, sounds, and dark mode." />
<style>
:root{`;
const prefix = `Awesome idea! I'll deliver a complete single-file HTML app called "Pomodoro" with:
- Custom session/break durations
You can save this as pomodoro.html and open it directly in your browser.
`;
// This simulates the real incomplete artifact case - no closing ``` or :::
const incompleteArtifact = `${ARTIFACT_START}{identifier="pomodoro-single-file-app" type="text/html" title="Pomodoro — Single File App"}
\`\`\`
${original}`;
const fullText = prefix + incompleteArtifact;
const message = { text: fullText };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
expect(artifacts[0].end).toBe(fullText.length);
const updated = original.replace('Pomodoro</title>', 'Pomodoro</title>UPDATED');
const result = replaceArtifactContent(fullText, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('UPDATED');
expect(result).toContain(prefix);
// Should not have added closing markers
expect(result).not.toMatch(/:::\s*$/);
});
test('should handle incomplete artifacts with only opening code block', () => {
const original = 'function hello() { console.log("world"); }';
const incompleteArtifact = `${ARTIFACT_START}{id="test"}\n\`\`\`\n${original}`;
const message = { text: incompleteArtifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'function hello() { console.log("UPDATED"); }';
const result = replaceArtifactContent(incompleteArtifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('UPDATED');
});
test('should handle incomplete artifacts without code blocks', () => {
const original = 'Some plain text content';
const incompleteArtifact = `${ARTIFACT_START}{id="test"}\n${original}`;
const message = { text: incompleteArtifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'Some UPDATED text content';
const result = replaceArtifactContent(incompleteArtifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('UPDATED');
});
});
describe('regression tests for edge cases', () => {
test('should still handle complete artifacts correctly', () => {
// Ensure we didn't break normal artifact handling
const original = 'console.log("test");';
const artifact = createArtifactText({ content: original });
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'console.log("updated");';
const result = replaceArtifactContent(artifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain(updated);
expect(result).toContain(ARTIFACT_END);
expect(result).toMatch(/```\nconsole\.log\("updated"\);\n```/);
});
test('should handle multiple complete artifacts', () => {
// Ensure multiple artifacts still work
const content1 = 'First artifact';
const content2 = 'Second artifact';
const text = `${createArtifactText({ content: content1 })}\n\n${createArtifactText({ content: content2 })}`;
const message = { text };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(2);
// Update first artifact
const result1 = replaceArtifactContent(text, artifacts[0], content1, 'First UPDATED');
expect(result1).not.toBeNull();
expect(result1).toContain('First UPDATED');
expect(result1).toContain(content2);
// Update second artifact
const result2 = replaceArtifactContent(text, artifacts[1], content2, 'Second UPDATED');
expect(result2).not.toBeNull();
expect(result2).toContain(content1);
expect(result2).toContain('Second UPDATED');
});
test('should not mistake ::: at position 0 for artifact end in complete artifacts', () => {
// This tests the specific fix - ensuring contentEnd=0 doesn't break complete artifacts
const original = 'test content';
// Create an artifact that will have ::: at position 0 when substring'd
const artifact = `${ARTIFACT_START}\n\`\`\`\n${original}\n\`\`\`\n${ARTIFACT_END}`;
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
const updated = 'updated content';
const result = replaceArtifactContent(artifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain(updated);
expect(result).toContain(ARTIFACT_END);
});
test('should handle empty artifacts', () => {
// Edge case: empty artifact
const artifact = `${ARTIFACT_START}\n${ARTIFACT_END}`;
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
expect(artifacts).toHaveLength(1);
// Trying to replace non-existent content should return null
const result = replaceArtifactContent(artifact, artifacts[0], 'something', 'updated');
expect(result).toBeNull();
});
test('should preserve whitespace and formatting in complete artifacts', () => {
const original = ` function test() {
return {
value: 42
};
}`;
const artifact = createArtifactText({ content: original });
const message = { text: artifact };
const artifacts = findAllArtifacts(message);
const updated = ` function test() {
return {
value: 100
};
}`;
const result = replaceArtifactContent(artifact, artifacts[0], original, updated);
expect(result).not.toBeNull();
expect(result).toContain('value: 100');
// Should preserve exact formatting
expect(result).toMatch(
/```\n {2}function test\(\) \{\n {4}return \{\n {6}value: 100\n {4}\};\n {2}\}\n```/,
);
});
});
});

View File

@@ -281,7 +281,7 @@ function createInProgressHandler(openai, thread_id, messages) {
openai.seenCompletedMessages.add(message_id);
const message = await openai.beta.threads.messages.retrieve(message_id, { thread_id });
const message = await openai.beta.threads.messages.retrieve(thread_id, message_id);
if (!message?.content?.length) {
return;
}
@@ -435,11 +435,9 @@ async function runAssistant({
};
});
const tool_outputs = await processRequiredActions(openai, actions);
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.id, {
thread_id: run.thread_id,
tool_outputs,
});
const outputs = await processRequiredActions(openai, actions);
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, outputs);
// Recursive call with accumulated steps and messages
return await runAssistant({

View File

@@ -45,7 +45,9 @@ module.exports = {
EModelEndpoint.azureAssistants,
),
[EModelEndpoint.bedrock]: generateConfig(
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY ?? process.env.BEDROCK_AWS_DEFAULT_REGION,
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY ??
process.env.BEDROCK_AWS_BEARER_TOKEN ??
process.env.BEDROCK_AWS_DEFAULT_REGION,
),
/* key will be part of separate config */
[EModelEndpoint.agents]: generateConfig('true', undefined, EModelEndpoint.agents),

View File

@@ -74,6 +74,23 @@ async function getEndpointsConfig(req) {
};
}
// Add individual credential flags for Bedrock
if (mergedConfig[EModelEndpoint.bedrock]) {
const userProvideAccessKeyId = process.env.BEDROCK_AWS_ACCESS_KEY_ID === 'user_provided';
const userProvideSecretAccessKey =
process.env.BEDROCK_AWS_SECRET_ACCESS_KEY === 'user_provided';
const userProvideSessionToken = process.env.BEDROCK_AWS_SESSION_TOKEN === 'user_provided';
const userProvideBearerToken = process.env.BEDROCK_AWS_BEARER_TOKEN === 'user_provided';
mergedConfig[EModelEndpoint.bedrock] = {
...mergedConfig[EModelEndpoint.bedrock],
userProvideAccessKeyId,
userProvideSecretAccessKey,
userProvideSessionToken,
userProvideBearerToken,
};
}
const endpointsConfig = orderEndpointsConfig(mergedConfig);
await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);

View File

@@ -1,11 +1,12 @@
const { logger } = require('@librechat/data-schemas');
const { EModelEndpoint } = require('librechat-data-provider');
const { useAzurePlugins } = require('~/server/services/Config/EndpointService').config;
const {
getAnthropicModels,
getBedrockModels,
getOpenAIModels,
getGoogleModels,
getBedrockModels,
getAnthropicModels,
} = require('~/server/services/ModelService');
const { logger } = require('~/config');
/**
* Loads the default models for the application.
@@ -15,42 +16,58 @@ const {
*/
async function loadDefaultModels(req) {
try {
const [openAI, anthropic, azureOpenAI, assistants, azureAssistants, google, bedrock] =
await Promise.all([
getOpenAIModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching OpenAI models:', error);
const [
openAI,
anthropic,
azureOpenAI,
gptPlugins,
assistants,
azureAssistants,
google,
bedrock,
] = await Promise.all([
getOpenAIModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching OpenAI models:', error);
return [];
}),
getAnthropicModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching Anthropic models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: useAzurePlugins, plugins: true }).catch(
(error) => {
logger.error('Error fetching Plugin models:', error);
return [];
}),
getAnthropicModels({ user: req.user.id }).catch((error) => {
logger.error('Error fetching Anthropic models:', error);
return [];
}),
getOpenAIModels({ user: req.user.id, azure: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI models:', error);
return [];
}),
getOpenAIModels({ assistants: true }).catch((error) => {
logger.error('Error fetching OpenAI Assistants API models:', error);
return [];
}),
getOpenAIModels({ azureAssistants: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
return [];
}),
Promise.resolve(getGoogleModels()).catch((error) => {
logger.error('Error getting Google models:', error);
return [];
}),
Promise.resolve(getBedrockModels()).catch((error) => {
logger.error('Error getting Bedrock models:', error);
return [];
}),
]);
},
),
getOpenAIModels({ assistants: true }).catch((error) => {
logger.error('Error fetching OpenAI Assistants API models:', error);
return [];
}),
getOpenAIModels({ azureAssistants: true }).catch((error) => {
logger.error('Error fetching Azure OpenAI Assistants API models:', error);
return [];
}),
Promise.resolve(getGoogleModels()).catch((error) => {
logger.error('Error getting Google models:', error);
return [];
}),
Promise.resolve(getBedrockModels()).catch((error) => {
logger.error('Error getting Bedrock models:', error);
return [];
}),
]);
return {
[EModelEndpoint.openAI]: openAI,
[EModelEndpoint.agents]: openAI,
[EModelEndpoint.google]: google,
[EModelEndpoint.anthropic]: anthropic,
[EModelEndpoint.gptPlugins]: gptPlugins,
[EModelEndpoint.azureOpenAI]: azureOpenAI,
[EModelEndpoint.assistants]: assistants,
[EModelEndpoint.azureAssistants]: azureAssistants,

View File

@@ -1,6 +1,6 @@
const { logger } = require('@librechat/data-schemas');
const { isAgentsEndpoint, removeNullishValues, Constants } = require('librechat-data-provider');
const { loadAgent } = require('~/models/Agent');
const { logger } = require('~/config');
const buildOptions = (req, endpoint, parsedBody, endpointType) => {
const { spec, iconURL, agent_id, instructions, ...model_parameters } = parsedBody;

View File

@@ -1,5 +1,4 @@
const { logger } = require('@librechat/data-schemas');
const { validateAgentModel } = require('@librechat/api');
const { createContentAggregator } = require('@librechat/agents');
const {
Constants,
@@ -12,12 +11,10 @@ const {
getDefaultHandlers,
} = require('~/server/controllers/agents/callbacks');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const { loadAgentTools } = require('~/server/services/ToolService');
const AgentClient = require('~/server/controllers/agents/client');
const { getAgent } = require('~/models/Agent');
const { logViolation } = require('~/cache');
function createToolLoader() {
/**
@@ -75,19 +72,6 @@ const initializeClient = async ({ req, res, endpointOption }) => {
throw new Error('Agent not found');
}
const modelsConfig = await getModelsConfig(req);
const validationResult = await validateAgentModel({
req,
res,
modelsConfig,
logViolation,
agent: primaryAgent,
});
if (!validationResult.isValid) {
throw new Error(validationResult.error?.message);
}
const agentConfigs = new Map();
/** @type {Set<string>} */
const allowedProviders = new Set(req?.app?.locals?.[EModelEndpoint.agents]?.allowedProviders);
@@ -117,19 +101,6 @@ const initializeClient = async ({ req, res, endpointOption }) => {
if (!agent) {
throw new Error(`Agent ${agentId} not found`);
}
const validationResult = await validateAgentModel({
req,
res,
agent,
modelsConfig,
logViolation,
});
if (!validationResult.isValid) {
throw new Error(validationResult.error?.message);
}
const config = await initializeAgent({
req,
res,

View File

@@ -6,7 +6,7 @@ const {
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OAIClient = require('~/app/clients/OpenAIClient');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
@@ -79,7 +79,7 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
openai.res = res;
if (endpointOption && initAppClient) {
const client = new OAIClient(apiKey, clientOptions);
const client = new OpenAIClient(apiKey, clientOptions);
return {
client,
openai,

View File

@@ -3,11 +3,11 @@ const { ProxyAgent } = require('undici');
const { constructAzureURL, isUserProvided, resolveHeaders } = require('@librechat/api');
const { ErrorTypes, EModelEndpoint, mapModelToAzureConfig } = require('librechat-data-provider');
const {
checkUserKeyExpiry,
getUserKeyValues,
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OAIClient = require('~/app/clients/OpenAIClient');
const OpenAIClient = require('~/app/clients/OpenAIClient');
class Files {
constructor(client) {
@@ -184,7 +184,7 @@ const initializeClient = async ({ req, res, version, endpointOption, initAppClie
}
if (endpointOption && initAppClient) {
const client = new OAIClient(apiKey, clientOptions);
const client = new OpenAIClient(apiKey, clientOptions);
return {
client,
openai,

View File

@@ -8,27 +8,43 @@ const {
bedrockOutputParser,
removeNullishValues,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { getUserKeyValues, checkUserKeyExpiry } = require('~/server/services/UserService');
const getOptions = async ({ req, overrideModel, endpointOption }) => {
const {
BEDROCK_AWS_SECRET_ACCESS_KEY,
BEDROCK_AWS_ACCESS_KEY_ID,
BEDROCK_AWS_SESSION_TOKEN,
BEDROCK_AWS_BEARER_TOKEN,
BEDROCK_REVERSE_PROXY,
BEDROCK_AWS_DEFAULT_REGION,
PROXY,
} = process.env;
const expiresAt = req.body.key;
const isUserProvided = BEDROCK_AWS_SECRET_ACCESS_KEY === AuthType.USER_PROVIDED;
const isUserProvided =
BEDROCK_AWS_SECRET_ACCESS_KEY === AuthType.USER_PROVIDED ||
BEDROCK_AWS_BEARER_TOKEN === AuthType.USER_PROVIDED;
let credentials = isUserProvided
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.bedrock })
: {
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
};
let userValues = null;
if (isUserProvided) {
if (expiresAt) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bedrock);
}
userValues = await getUserKeyValues({ userId: req.user.id, name: EModelEndpoint.bedrock });
}
let credentials;
if (isUserProvided) {
credentials = JSON.parse(userValues.apiKey);
} else if (BEDROCK_AWS_BEARER_TOKEN) {
credentials = { bearerToken: BEDROCK_AWS_BEARER_TOKEN };
} else {
credentials = {
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
};
}
if (!credentials) {
throw new Error('Bedrock credentials not provided. Please provide them again.');
@@ -36,6 +52,7 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
if (
!isUserProvided &&
!credentials.bearerToken &&
(credentials.accessKeyId === undefined || credentials.accessKeyId === '') &&
(credentials.secretAccessKey === undefined || credentials.secretAccessKey === '')
) {

View File

@@ -1,166 +0,0 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { validateAnthropicPdf } = require('../validation/pdfValidator');
/**
* Converts a readable stream to a buffer.
*
* @param {NodeJS.ReadableStream} stream - The readable stream to convert.
* @returns {Promise<Buffer>} - Promise resolving to the buffer.
*/
async function streamToBuffer(stream) {
return new Promise((resolve, reject) => {
const chunks = [];
stream.on('data', (chunk) => {
chunks.push(chunk);
});
stream.on('end', () => {
try {
const buffer = Buffer.concat(chunks);
chunks.length = 0; // Clear the array
resolve(buffer);
} catch (err) {
reject(err);
}
});
stream.on('error', (error) => {
chunks.length = 0;
reject(error);
});
}).finally(() => {
// Clean up the stream if required
if (stream.destroy && typeof stream.destroy === 'function') {
stream.destroy();
}
});
}
/**
* Processes and encodes document files for various endpoints
*
* @param {Express.Request} req - Express request object
* @param {MongoFile[]} files - Array of file objects to process
* @param {string} endpoint - The endpoint identifier (e.g., EModelEndpoint.anthropic)
* @returns {Promise<{documents: MessageContentDocument[], files: MongoFile[]}>}
*/
async function encodeAndFormatDocuments(req, files, endpoint) {
const promises = [];
/** @type {Record<FileSources, Pick<ReturnType<typeof getStrategyFunctions>, 'prepareDocumentPayload' | 'getDownloadStream'>>} */
const encodingMethods = {};
/** @type {{ documents: MessageContentDocument[]; files: MongoFile[] }} */
const result = {
documents: [],
files: [],
};
if (!files || !files.length) {
return result;
}
// Filter for document files only
const documentFiles = files.filter(
(file) => file.type === 'application/pdf' || file.type?.startsWith('application/'), // Future: support for other document types
);
if (!documentFiles.length) {
return result;
}
for (let file of documentFiles) {
/** @type {FileSources} */
const source = file.source ?? 'local';
// Only process PDFs for Anthropic for now
if (file.type !== 'application/pdf' || endpoint !== EModelEndpoint.anthropic) {
continue;
}
if (!encodingMethods[source]) {
encodingMethods[source] = getStrategyFunctions(source);
}
// Prepare file metadata
const fileMetadata = {
file_id: file.file_id || file._id,
temp_file_id: file.temp_file_id,
filepath: file.filepath,
source: file.source,
filename: file.filename,
type: file.type,
};
promises.push([file, fileMetadata]);
}
const results = await Promise.allSettled(
promises.map(async ([file, fileMetadata]) => {
if (!file || !fileMetadata) {
return { file: null, content: null, metadata: fileMetadata };
}
try {
const source = file.source ?? 'local';
const { getDownloadStream } = encodingMethods[source];
const stream = await getDownloadStream(req, file.filepath);
const buffer = await streamToBuffer(stream);
const documentContent = buffer.toString('base64');
return {
file,
content: documentContent,
metadata: fileMetadata,
};
} catch (error) {
console.error(`Error processing document ${file.filename}:`, error);
return { file, content: null, metadata: fileMetadata };
}
}),
);
for (const settledResult of results) {
if (settledResult.status === 'rejected') {
console.error('Document processing failed:', settledResult.reason);
continue;
}
const { file, content, metadata } = settledResult.value;
if (!content || !file) {
if (metadata) {
result.files.push(metadata);
}
continue;
}
if (file.type === 'application/pdf' && endpoint === EModelEndpoint.anthropic) {
const pdfBuffer = Buffer.from(content, 'base64');
const validation = await validateAnthropicPdf(pdfBuffer, pdfBuffer.length);
if (!validation.isValid) {
throw new Error(`PDF validation failed: ${validation.error}`);
}
const documentPart = {
type: 'document',
source: {
type: 'base64',
media_type: 'application/pdf',
data: content,
},
};
result.documents.push(documentPart);
result.files.push(metadata);
}
}
return result;
}
module.exports = {
encodeAndFormatDocuments,
};

View File

@@ -1,5 +0,0 @@
const { encodeAndFormatDocuments } = require('./encode');
module.exports = {
encodeAndFormatDocuments,
};

View File

@@ -391,17 +391,7 @@ const processFileUpload = async ({ req, res, metadata }) => {
const isAssistantUpload = isAssistantsEndpoint(metadata.endpoint);
const assistantSource =
metadata.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
// Use local storage for Anthropic native PDF support, vectordb for others
const isAnthropicUpload = metadata.endpoint === EModelEndpoint.anthropic;
let source;
if (isAssistantUpload) {
source = assistantSource;
} else if (isAnthropicUpload) {
source = FileSources.local;
} else {
source = FileSources.vectordb;
}
const source = isAssistantUpload ? assistantSource : FileSources.vectordb;
const { handleFileUpload } = getStrategyFunctions(source);
const { file_id, temp_file_id } = metadata;

View File

@@ -1,77 +0,0 @@
const { logger } = require('~/config');
const { anthropicPdfSizeLimit } = require('librechat-data-provider');
/**
* Validates if a PDF meets Anthropic's requirements
* @param {Buffer} pdfBuffer - The PDF file as a buffer
* @param {number} fileSize - The file size in bytes
* @returns {Promise<{isValid: boolean, error?: string}>}
*/
async function validateAnthropicPdf(pdfBuffer, fileSize) {
try {
// Check file size (32MB limit)
if (fileSize > anthropicPdfSizeLimit) {
return {
isValid: false,
error: `PDF file size (${Math.round(fileSize / (1024 * 1024))}MB) exceeds Anthropic's 32MB limit`,
};
}
// Basic PDF header validation
if (!pdfBuffer || pdfBuffer.length < 5) {
return {
isValid: false,
error: 'Invalid PDF file: too small or corrupted',
};
}
// Check PDF magic bytes
const pdfHeader = pdfBuffer.subarray(0, 5).toString();
if (!pdfHeader.startsWith('%PDF-')) {
return {
isValid: false,
error: 'Invalid PDF file: missing PDF header',
};
}
// Check for password protection/encryption
const pdfContent = pdfBuffer.toString('binary');
if (
pdfContent.includes('/Encrypt ') ||
pdfContent.includes('/U (') ||
pdfContent.includes('/O (')
) {
return {
isValid: false,
error: 'PDF is password-protected or encrypted. Anthropic requires unencrypted PDFs.',
};
}
// Estimate page count (this is a rough estimation)
const pageMatches = pdfContent.match(/\/Type[\s]*\/Page[^s]/g);
const estimatedPages = pageMatches ? pageMatches.length : 1;
if (estimatedPages > 100) {
return {
isValid: false,
error: `PDF has approximately ${estimatedPages} pages, exceeding Anthropic's 100-page limit`,
};
}
logger.debug(
`PDF validation passed: ${Math.round(fileSize / 1024)}KB, ~${estimatedPages} pages`,
);
return { isValid: true };
} catch (error) {
logger.error('PDF validation error:', error);
return {
isValid: false,
error: 'Failed to validate PDF file',
};
}
}
module.exports = {
validateAnthropicPdf,
};

View File

@@ -12,7 +12,7 @@ const {
} = require('@librechat/api');
const { findToken, createToken, updateToken } = require('~/models');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { getCachedTools, loadCustomConfig } = require('./Config');
const { getCachedTools } = require('./Config');
const { getLogStores } = require('~/cache');
/**
@@ -239,135 +239,6 @@ async function createMCPTool({ req, res, toolKey, provider: _provider }) {
return toolInstance;
}
/**
* Get MCP setup data including config, connections, and OAuth servers
* @param {string} userId - The user ID
* @returns {Object} Object containing mcpConfig, appConnections, userConnections, and oauthServers
*/
async function getMCPSetupData(userId) {
const printConfig = false;
const config = await loadCustomConfig(printConfig);
const mcpConfig = config?.mcpServers;
if (!mcpConfig) {
throw new Error('MCP config not found');
}
const mcpManager = getMCPManager(userId);
const appConnections = mcpManager.getAllConnections() || new Map();
const userConnections = mcpManager.getUserConnections(userId) || new Map();
const oauthServers = mcpManager.getOAuthServers() || new Set();
return {
mcpConfig,
appConnections,
userConnections,
oauthServers,
};
}
/**
* Check OAuth flow status for a user and server
* @param {string} userId - The user ID
* @param {string} serverName - The server name
* @returns {Object} Object containing hasActiveFlow and hasFailedFlow flags
*/
async function checkOAuthFlowStatus(userId, serverName) {
const flowsCache = getLogStores(CacheKeys.FLOWS);
const flowManager = getFlowStateManager(flowsCache);
const flowId = MCPOAuthHandler.generateFlowId(userId, serverName);
try {
const flowState = await flowManager.getFlowState(flowId, 'mcp_oauth');
if (!flowState) {
return { hasActiveFlow: false, hasFailedFlow: false };
}
const flowAge = Date.now() - flowState.createdAt;
const flowTTL = flowState.ttl || 180000; // Default 3 minutes
if (flowState.status === 'FAILED' || flowAge > flowTTL) {
const wasCancelled = flowState.error && flowState.error.includes('cancelled');
if (wasCancelled) {
logger.debug(`[MCP Connection Status] Found cancelled OAuth flow for ${serverName}`, {
flowId,
status: flowState.status,
error: flowState.error,
});
return { hasActiveFlow: false, hasFailedFlow: false };
} else {
logger.debug(`[MCP Connection Status] Found failed OAuth flow for ${serverName}`, {
flowId,
status: flowState.status,
flowAge,
flowTTL,
timedOut: flowAge > flowTTL,
error: flowState.error,
});
return { hasActiveFlow: false, hasFailedFlow: true };
}
}
if (flowState.status === 'PENDING') {
logger.debug(`[MCP Connection Status] Found active OAuth flow for ${serverName}`, {
flowId,
flowAge,
flowTTL,
});
return { hasActiveFlow: true, hasFailedFlow: false };
}
return { hasActiveFlow: false, hasFailedFlow: false };
} catch (error) {
logger.error(`[MCP Connection Status] Error checking OAuth flows for ${serverName}:`, error);
return { hasActiveFlow: false, hasFailedFlow: false };
}
}
/**
* Get connection status for a specific MCP server
* @param {string} userId - The user ID
* @param {string} serverName - The server name
* @param {Map} appConnections - App-level connections
* @param {Map} userConnections - User-level connections
* @param {Set} oauthServers - Set of OAuth servers
* @returns {Object} Object containing requiresOAuth and connectionState
*/
async function getServerConnectionStatus(
userId,
serverName,
appConnections,
userConnections,
oauthServers,
) {
const getConnectionState = () =>
appConnections.get(serverName)?.connectionState ??
userConnections.get(serverName)?.connectionState ??
'disconnected';
const baseConnectionState = getConnectionState();
let finalConnectionState = baseConnectionState;
if (baseConnectionState === 'disconnected' && oauthServers.has(serverName)) {
const { hasActiveFlow, hasFailedFlow } = await checkOAuthFlowStatus(userId, serverName);
if (hasFailedFlow) {
finalConnectionState = 'error';
} else if (hasActiveFlow) {
finalConnectionState = 'connecting';
}
}
return {
requiresOAuth: oauthServers.has(serverName),
connectionState: finalConnectionState,
};
}
module.exports = {
createMCPTool,
getMCPSetupData,
checkOAuthFlowStatus,
getServerConnectionStatus,
};

View File

@@ -1,510 +0,0 @@
const { logger } = require('@librechat/data-schemas');
const { MCPOAuthHandler } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
const { getMCPSetupData, checkOAuthFlowStatus, getServerConnectionStatus } = require('./MCP');
// Mock all dependencies
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
},
}));
jest.mock('@librechat/api', () => ({
MCPOAuthHandler: {
generateFlowId: jest.fn(),
},
}));
jest.mock('librechat-data-provider', () => ({
CacheKeys: {
FLOWS: 'flows',
},
}));
jest.mock('./Config', () => ({
loadCustomConfig: jest.fn(),
}));
jest.mock('~/config', () => ({
getMCPManager: jest.fn(),
getFlowStateManager: jest.fn(),
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(),
}));
jest.mock('~/models', () => ({
findToken: jest.fn(),
createToken: jest.fn(),
updateToken: jest.fn(),
}));
describe('tests for the new helper functions used by the MCP connection status endpoints', () => {
let mockLoadCustomConfig;
let mockGetMCPManager;
let mockGetFlowStateManager;
let mockGetLogStores;
beforeEach(() => {
jest.clearAllMocks();
mockLoadCustomConfig = require('./Config').loadCustomConfig;
mockGetMCPManager = require('~/config').getMCPManager;
mockGetFlowStateManager = require('~/config').getFlowStateManager;
mockGetLogStores = require('~/cache').getLogStores;
});
describe('getMCPSetupData', () => {
const mockUserId = 'user-123';
const mockConfig = {
mcpServers: {
server1: { type: 'stdio' },
server2: { type: 'http' },
},
};
beforeEach(() => {
mockGetMCPManager.mockReturnValue({
getAllConnections: jest.fn(() => new Map()),
getUserConnections: jest.fn(() => new Map()),
getOAuthServers: jest.fn(() => new Set()),
});
});
it('should successfully return MCP setup data', async () => {
mockLoadCustomConfig.mockResolvedValue(mockConfig);
const mockAppConnections = new Map([['server1', { status: 'connected' }]]);
const mockUserConnections = new Map([['server2', { status: 'disconnected' }]]);
const mockOAuthServers = new Set(['server2']);
const mockMCPManager = {
getAllConnections: jest.fn(() => mockAppConnections),
getUserConnections: jest.fn(() => mockUserConnections),
getOAuthServers: jest.fn(() => mockOAuthServers),
};
mockGetMCPManager.mockReturnValue(mockMCPManager);
const result = await getMCPSetupData(mockUserId);
expect(mockLoadCustomConfig).toHaveBeenCalledWith(false);
expect(mockGetMCPManager).toHaveBeenCalledWith(mockUserId);
expect(mockMCPManager.getAllConnections).toHaveBeenCalled();
expect(mockMCPManager.getUserConnections).toHaveBeenCalledWith(mockUserId);
expect(mockMCPManager.getOAuthServers).toHaveBeenCalled();
expect(result).toEqual({
mcpConfig: mockConfig.mcpServers,
appConnections: mockAppConnections,
userConnections: mockUserConnections,
oauthServers: mockOAuthServers,
});
});
it('should throw error when MCP config not found', async () => {
mockLoadCustomConfig.mockResolvedValue({});
await expect(getMCPSetupData(mockUserId)).rejects.toThrow('MCP config not found');
});
it('should handle null values from MCP manager gracefully', async () => {
mockLoadCustomConfig.mockResolvedValue(mockConfig);
const mockMCPManager = {
getAllConnections: jest.fn(() => null),
getUserConnections: jest.fn(() => null),
getOAuthServers: jest.fn(() => null),
};
mockGetMCPManager.mockReturnValue(mockMCPManager);
const result = await getMCPSetupData(mockUserId);
expect(result).toEqual({
mcpConfig: mockConfig.mcpServers,
appConnections: new Map(),
userConnections: new Map(),
oauthServers: new Set(),
});
});
});
describe('checkOAuthFlowStatus', () => {
const mockUserId = 'user-123';
const mockServerName = 'test-server';
const mockFlowId = 'flow-123';
beforeEach(() => {
const mockFlowsCache = {};
const mockFlowManager = {
getFlowState: jest.fn(),
};
mockGetLogStores.mockReturnValue(mockFlowsCache);
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
MCPOAuthHandler.generateFlowId.mockReturnValue(mockFlowId);
});
it('should return false flags when no flow state exists', async () => {
const mockFlowManager = { getFlowState: jest.fn(() => null) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(mockGetLogStores).toHaveBeenCalledWith(CacheKeys.FLOWS);
expect(MCPOAuthHandler.generateFlowId).toHaveBeenCalledWith(mockUserId, mockServerName);
expect(mockFlowManager.getFlowState).toHaveBeenCalledWith(mockFlowId, 'mcp_oauth');
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: false });
});
it('should detect failed flow when status is FAILED', async () => {
const mockFlowState = {
status: 'FAILED',
createdAt: Date.now() - 60000, // 1 minute ago
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: true });
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('Found failed OAuth flow'),
expect.objectContaining({
flowId: mockFlowId,
status: 'FAILED',
}),
);
});
it('should detect failed flow when flow has timed out', async () => {
const mockFlowState = {
status: 'PENDING',
createdAt: Date.now() - 200000, // 200 seconds ago (> 180s TTL)
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: true });
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('Found failed OAuth flow'),
expect.objectContaining({
timedOut: true,
}),
);
});
it('should detect failed flow when TTL not specified and flow exceeds default TTL', async () => {
const mockFlowState = {
status: 'PENDING',
createdAt: Date.now() - 200000, // 200 seconds ago (> 180s default TTL)
// ttl not specified, should use 180000 default
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: true });
});
it('should detect active flow when status is PENDING and within TTL', async () => {
const mockFlowState = {
status: 'PENDING',
createdAt: Date.now() - 60000, // 1 minute ago (< 180s TTL)
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: true, hasFailedFlow: false });
expect(logger.debug).toHaveBeenCalledWith(
expect.stringContaining('Found active OAuth flow'),
expect.objectContaining({
flowId: mockFlowId,
}),
);
});
it('should return false flags for other statuses', async () => {
const mockFlowState = {
status: 'COMPLETED',
createdAt: Date.now() - 60000,
ttl: 180000,
};
const mockFlowManager = { getFlowState: jest.fn(() => mockFlowState) };
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: false });
});
it('should handle errors gracefully', async () => {
const mockError = new Error('Flow state error');
const mockFlowManager = {
getFlowState: jest.fn(() => {
throw mockError;
}),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
const result = await checkOAuthFlowStatus(mockUserId, mockServerName);
expect(result).toEqual({ hasActiveFlow: false, hasFailedFlow: false });
expect(logger.error).toHaveBeenCalledWith(
expect.stringContaining('Error checking OAuth flows'),
mockError,
);
});
});
describe('getServerConnectionStatus', () => {
const mockUserId = 'user-123';
const mockServerName = 'test-server';
it('should return app connection state when available', async () => {
const appConnections = new Map([[mockServerName, { connectionState: 'connected' }]]);
const userConnections = new Map();
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'connected',
});
});
it('should fallback to user connection state when app connection not available', async () => {
const appConnections = new Map();
const userConnections = new Map([[mockServerName, { connectionState: 'connecting' }]]);
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'connecting',
});
});
it('should default to disconnected when no connections exist', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'disconnected',
});
});
it('should prioritize app connection over user connection', async () => {
const appConnections = new Map([[mockServerName, { connectionState: 'connected' }]]);
const userConnections = new Map([[mockServerName, { connectionState: 'disconnected' }]]);
const oauthServers = new Set();
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'connected',
});
});
it('should indicate OAuth requirement when server is in OAuth servers set', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result.requiresOAuth).toBe(true);
});
it('should handle OAuth flow status when disconnected and requires OAuth with failed flow', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock flow state to return failed flow
const mockFlowManager = {
getFlowState: jest.fn(() => ({
status: 'FAILED',
createdAt: Date.now() - 60000,
ttl: 180000,
})),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
MCPOAuthHandler.generateFlowId.mockReturnValue('test-flow-id');
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'error',
});
});
it('should handle OAuth flow status when disconnected and requires OAuth with active flow', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock flow state to return active flow
const mockFlowManager = {
getFlowState: jest.fn(() => ({
status: 'PENDING',
createdAt: Date.now() - 60000, // 1 minute ago
ttl: 180000, // 3 minutes TTL
})),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
MCPOAuthHandler.generateFlowId.mockReturnValue('test-flow-id');
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'connecting',
});
});
it('should handle OAuth flow status when disconnected and requires OAuth with no flow', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock flow state to return no flow
const mockFlowManager = {
getFlowState: jest.fn(() => null),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
MCPOAuthHandler.generateFlowId.mockReturnValue('test-flow-id');
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'disconnected',
});
});
it('should not check OAuth flow status when server is connected', async () => {
const mockFlowManager = {
getFlowState: jest.fn(),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
const appConnections = new Map([[mockServerName, { connectionState: 'connected' }]]);
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'connected',
});
// Should not call flow manager since server is connected
expect(mockFlowManager.getFlowState).not.toHaveBeenCalled();
});
it('should not check OAuth flow status when server does not require OAuth', async () => {
const mockFlowManager = {
getFlowState: jest.fn(),
};
mockGetFlowStateManager.mockReturnValue(mockFlowManager);
mockGetLogStores.mockReturnValue({});
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set(); // Server not in OAuth servers
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: false,
connectionState: 'disconnected',
});
// Should not call flow manager since server doesn't require OAuth
expect(mockFlowManager.getFlowState).not.toHaveBeenCalled();
});
});
});

View File

@@ -91,10 +91,11 @@ class RunManager {
* @param {boolean} [params.final] - The end of the run polling loop, due to `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired` statuses.
*/
async fetchRunSteps({ openai, thread_id, run_id, runStatus, final = false }) {
// const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(run_id, { thread_id });
// const { data: steps, first_id, last_id, has_more } = await openai.beta.threads.runs.steps.list(thread_id, run_id);
const { data: _steps } = await openai.beta.threads.runs.steps.list(
thread_id,
run_id,
{ thread_id },
{},
{
timeout: 3000,
maxRetries: 5,

View File

@@ -573,9 +573,9 @@ class StreamRunManager {
let toolRun;
try {
toolRun = this.openai.beta.threads.runs.submitToolOutputsStream(
run.thread_id,
run.id,
{
thread_id: run.thread_id,
tool_outputs,
stream: true,
},

View File

@@ -33,7 +33,7 @@ async function withTimeout(promise, timeoutMs, timeoutMessage) {
* @param {string} [params.body.model] - Optional. The ID of the model to be used for this run.
* @param {string} [params.body.instructions] - Optional. Override the default system message of the assistant.
* @param {string} [params.body.additional_instructions] - Optional. Appends additional instructions
* at the end of the instructions for the run. This is useful for modifying
* at theend of the instructions for the run. This is useful for modifying
* the behavior on a per-run basis without overriding other instructions.
* @param {Object[]} [params.body.tools] - Optional. Override the tools the assistant can use for this run.
* @param {string[]} [params.body.file_ids] - Optional.
@@ -179,7 +179,7 @@ async function waitForRun({
* @return {Promise<RunStep[]>} A promise that resolves to an array of RunStep objects.
*/
async function _retrieveRunSteps({ openai, thread_id, run_id }) {
const runSteps = await openai.beta.threads.runs.steps.list(run_id, { thread_id });
const runSteps = await openai.beta.threads.runs.steps.list(thread_id, run_id);
return runSteps;
}

View File

@@ -192,8 +192,7 @@ async function addThreadMetadata({ openai, thread_id, messageId, messages }) {
const promises = [];
for (const message of messages) {
promises.push(
openai.beta.threads.messages.update(message.id, {
thread_id,
openai.beta.threads.messages.update(thread_id, message.id, {
metadata: {
messageId,
},
@@ -264,8 +263,7 @@ async function syncMessages({
}
modifyPromises.push(
openai.beta.threads.messages.update(apiMessage.id, {
thread_id,
openai.beta.threads.messages.update(thread_id, apiMessage.id, {
metadata: {
messageId: dbMessage.messageId,
},
@@ -415,7 +413,7 @@ async function checkMessageGaps({
}) {
const promises = [];
promises.push(openai.beta.threads.messages.list(thread_id, defaultOrderQuery));
promises.push(openai.beta.threads.runs.steps.list(run_id, { thread_id }));
promises.push(openai.beta.threads.runs.steps.list(thread_id, run_id));
/** @type {[{ data: ThreadMessage[] }, { data: RunStep[] }]} */
const [response, stepsResponse] = await Promise.all(promises);

View File

@@ -1,7 +1,6 @@
const fs = require('fs');
const path = require('path');
const { sleep } = require('@librechat/agents');
const { getToolkitKey } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Calculator } = require('@langchain/community/tools/calculator');
@@ -12,6 +11,7 @@ const {
ErrorTypes,
ContentTypes,
imageGenTools,
EToolResources,
EModelEndpoint,
actionDelimiter,
ImageVisionTool,
@@ -40,6 +40,30 @@ const { recordUsage } = require('~/server/services/Threads');
const { loadTools } = require('~/app/clients/tools/util');
const { redactMessage } = require('~/config/parsers');
/**
* @param {string} toolName
* @returns {string | undefined} toolKey
*/
function getToolkitKey(toolName) {
/** @type {string|undefined} */
let toolkitKey;
for (const toolkit of toolkits) {
if (toolName.startsWith(EToolResources.image_edit)) {
const splitMatches = toolkit.pluginKey.split('_');
const suffix = splitMatches[splitMatches.length - 1];
if (toolName.endsWith(suffix)) {
toolkitKey = toolkit.pluginKey;
break;
}
}
if (toolName.startsWith(toolkit.pluginKey)) {
toolkitKey = toolkit.pluginKey;
break;
}
}
return toolkitKey;
}
/**
* Loads and formats tools from the specified tool directory.
*
@@ -121,7 +145,7 @@ function loadAndFormatTools({ directory, adminFilter = [], adminIncluded = [] })
for (const toolInstance of basicToolInstances) {
const formattedTool = formatToOpenAIAssistantTool(toolInstance);
let toolName = formattedTool[Tools.function].name;
toolName = getToolkitKey({ toolkits, toolName }) ?? toolName;
toolName = getToolkitKey(toolName) ?? toolName;
if (filter.has(toolName) && included.size === 0) {
continue;
}

View File

@@ -2,11 +2,11 @@ const {
SystemRoles,
Permissions,
PermissionTypes,
isMemoryEnabled,
removeNullishValues,
} = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { isMemoryEnabled } = require('@librechat/api');
const { updateAccessPermissions } = require('~/models/Role');
const { logger } = require('~/config');
/**
* Loads the default interface object.

View File

@@ -1,10 +1,10 @@
const fs = require('fs');
const { isEnabled } = require('@librechat/api');
const LdapStrategy = require('passport-ldapauth');
const { SystemRoles } = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { SystemRoles, ErrorTypes } = require('librechat-data-provider');
const { createUser, findUser, updateUser, countUsers } = require('~/models');
const { getBalanceConfig } = require('~/server/services/Config');
const { isEnabled } = require('~/server/utils');
const {
LDAP_URL,
@@ -90,14 +90,6 @@ const ldapLogin = new LdapStrategy(ldapOptions, async (userinfo, done) => {
(LDAP_ID && userinfo[LDAP_ID]) || userinfo.uid || userinfo.sAMAccountName || userinfo.mail;
let user = await findUser({ ldapId });
if (user && user.provider !== 'ldap') {
logger.info(
`[ldapStrategy] User ${user.email} already exists with provider ${user.provider}`,
);
return done(null, false, {
message: ErrorTypes.AUTH_FAILED,
});
}
const fullNameAttributes = LDAP_FULL_NAME && LDAP_FULL_NAME.split(',');
const fullName =

View File

@@ -3,9 +3,9 @@ const fetch = require('node-fetch');
const passport = require('passport');
const client = require('openid-client');
const jwtDecode = require('jsonwebtoken/decode');
const { CacheKeys } = require('librechat-data-provider');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { hashToken, logger } = require('@librechat/data-schemas');
const { CacheKeys, ErrorTypes } = require('librechat-data-provider');
const { Strategy: OpenIDStrategy } = require('openid-client/passport');
const { isEnabled, safeStringify, logHeaders } = require('@librechat/api');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
@@ -104,14 +104,6 @@ class CustomOpenIDStrategy extends OpenIDStrategy {
if (options?.state && !params.has('state')) {
params.set('state', options.state);
}
if (process.env.OPENID_AUDIENCE) {
params.set('audience', process.env.OPENID_AUDIENCE);
logger.debug(
`[openidStrategy] Adding audience to authorization request: ${process.env.OPENID_AUDIENCE}`,
);
}
return params;
}
}
@@ -320,14 +312,6 @@ async function setupOpenId() {
} for openidId: ${claims.sub}`,
);
}
if (user != null && user.provider !== 'openid') {
logger.info(
`[openidStrategy] Attempted OpenID login by user ${user.email}, was registered with "${user.provider}" provider`,
);
return done(null, false, {
message: ErrorTypes.AUTH_FAILED,
});
}
const userinfo = {
...claims,
...(await getUserInfo(openidConfig, tokenset.access_token, claims.sub)),
@@ -369,7 +353,7 @@ async function setupOpenId() {
username = userinfo[process.env.OPENID_USERNAME_CLAIM];
} else {
username = convertToUsername(
userinfo.preferred_username || userinfo.username || userinfo.email,
userinfo.username || userinfo.given_name || userinfo.email,
);
}

View File

@@ -1,8 +1,7 @@
const fetch = require('node-fetch');
const jwtDecode = require('jsonwebtoken/decode');
const { ErrorTypes } = require('librechat-data-provider');
const { findUser, createUser, updateUser } = require('~/models');
const { setupOpenId } = require('./openidStrategy');
const { findUser, createUser, updateUser } = require('~/models');
// --- Mocks ---
jest.mock('node-fetch');
@@ -51,9 +50,11 @@ jest.mock('openid-client', () => {
issuer: 'https://fake-issuer.com',
// Add any other properties needed by the implementation
}),
fetchUserInfo: jest.fn().mockImplementation(() => {
fetchUserInfo: jest.fn().mockImplementation((config, accessToken, sub) => {
// Only return additional properties, but don't override any claims
return Promise.resolve({});
return Promise.resolve({
preferred_username: 'preferred_username',
});
}),
customFetch: Symbol('customFetch'),
};
@@ -103,7 +104,6 @@ describe('setupOpenId', () => {
given_name: 'First',
family_name: 'Last',
name: 'My Full',
preferred_username: 'testusername',
username: 'flast',
picture: 'https://example.com/avatar.png',
}),
@@ -156,20 +156,20 @@ describe('setupOpenId', () => {
verifyCallback = require('openid-client/passport').__getVerifyCallback();
});
it('should create a new user with correct username when preferred_username claim exists', async () => {
// Arrange our userinfo already has preferred_username 'testusername'
it('should create a new user with correct username when username claim exists', async () => {
// Arrange our userinfo already has username 'flast'
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);
// Assert
expect(user.username).toBe(userinfo.preferred_username);
expect(user.username).toBe(userinfo.username);
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.preferred_username,
username: userinfo.username,
email: userinfo.email,
name: `${userinfo.given_name} ${userinfo.family_name}`,
}),
@@ -179,12 +179,12 @@ describe('setupOpenId', () => {
);
});
it('should use username as username when preferred_username claim is missing', async () => {
// Arrange remove preferred_username from userinfo
it('should use given_name as username when username claim is missing', async () => {
// Arrange remove username from userinfo
const userinfo = { ...tokenset.claims() };
delete userinfo.preferred_username;
// Expect the username to be the "username"
const expectUsername = userinfo.username;
delete userinfo.username;
// Expect the username to be the given name (unchanged case)
const expectUsername = userinfo.given_name;
// Act
const { user } = await validate({ ...tokenset, claims: () => userinfo });
@@ -199,11 +199,11 @@ describe('setupOpenId', () => {
);
});
it('should use email as username when username and preferred_username are missing', async () => {
// Arrange remove username and preferred_username
it('should use email as username when username and given_name are missing', async () => {
// Arrange remove username and given_name
const userinfo = { ...tokenset.claims() };
delete userinfo.username;
delete userinfo.preferred_username;
delete userinfo.given_name;
const expectUsername = userinfo.email;
// Act
@@ -262,20 +262,17 @@ describe('setupOpenId', () => {
});
it('should update an existing user on login', async () => {
// Arrange simulate that a user already exists with openid provider
// Arrange simulate that a user already exists
const existingUser = {
_id: 'existingUserId',
provider: 'openid',
provider: 'local',
email: tokenset.claims().email,
openidId: '',
username: '',
name: '',
};
findUser.mockImplementation(async (query) => {
if (
query.openidId === tokenset.claims().sub ||
(query.email === tokenset.claims().email && query.provider === 'openid')
) {
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
return existingUser;
}
return null;
@@ -292,44 +289,18 @@ describe('setupOpenId', () => {
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.preferred_username,
username: userinfo.username,
name: `${userinfo.given_name} ${userinfo.family_name}`,
}),
);
});
it('should block login when email exists with different provider', async () => {
// Arrange simulate that a user exists with same email but different provider
const existingUser = {
_id: 'existingUserId',
provider: 'google',
email: tokenset.claims().email,
googleId: 'some-google-id',
username: 'existinguser',
name: 'Existing User',
};
findUser.mockImplementation(async (query) => {
if (query.email === tokenset.claims().email && !query.provider) {
return existingUser;
}
return null;
});
// Act
const result = await validate(tokenset);
// Assert verify that the strategy rejects login
expect(result.user).toBe(false);
expect(result.details.message).toBe(ErrorTypes.AUTH_FAILED);
expect(createUser).not.toHaveBeenCalled();
expect(updateUser).not.toHaveBeenCalled();
});
it('should enforce the required role and reject login if missing', async () => {
// Arrange simulate a token without the required role.
jwtDecode.mockReturnValue({
roles: ['SomeOtherRole'],
});
const userinfo = tokenset.claims();
// Act
const { user, details } = await validate(tokenset);
@@ -340,6 +311,9 @@ describe('setupOpenId', () => {
});
it('should attempt to download and save the avatar if picture is provided', async () => {
// Arrange ensure userinfo contains a picture URL
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);

View File

@@ -22,12 +22,9 @@ const handleExistingUser = async (oldUser, avatarUrl) => {
const isLocal = fileStrategy === FileSources.local;
let updatedAvatar = false;
const hasManualFlag =
typeof oldUser?.avatar === 'string' && oldUser.avatar.includes('?manual=true');
if (isLocal && (!oldUser?.avatar || !hasManualFlag)) {
if (isLocal && (oldUser.avatar === null || !oldUser.avatar.includes('?manual=true'))) {
updatedAvatar = avatarUrl;
} else if (!isLocal && (!oldUser?.avatar || !hasManualFlag)) {
} else if (!isLocal && (oldUser.avatar === null || !oldUser.avatar.includes('?manual=true'))) {
const userId = oldUser._id;
const resizedBuffer = await resizeAvatar({
userId,

View File

@@ -1,164 +0,0 @@
const { FileSources } = require('librechat-data-provider');
const { handleExistingUser } = require('./process');
jest.mock('~/server/services/Files/strategies', () => ({
getStrategyFunctions: jest.fn(),
}));
jest.mock('~/server/services/Files/images/avatar', () => ({
resizeAvatar: jest.fn(),
}));
jest.mock('~/models', () => ({
updateUser: jest.fn(),
createUser: jest.fn(),
getUserById: jest.fn(),
}));
jest.mock('~/server/services/Config', () => ({
getBalanceConfig: jest.fn(),
}));
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { resizeAvatar } = require('~/server/services/Files/images/avatar');
const { updateUser } = require('~/models');
describe('handleExistingUser', () => {
beforeEach(() => {
jest.clearAllMocks();
process.env.CDN_PROVIDER = FileSources.local;
});
it('should handle null avatar without throwing error', async () => {
const oldUser = {
_id: 'user123',
avatar: null,
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle undefined avatar without throwing error', async () => {
const oldUser = {
_id: 'user123',
// avatar is undefined
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should not update avatar if it has manual=true flag', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/avatar.png?manual=true',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).not.toHaveBeenCalled();
});
it('should update avatar for local storage when avatar has no manual flag', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/old-avatar.png',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should process avatar for non-local storage', async () => {
process.env.CDN_PROVIDER = 's3';
const mockProcessAvatar = jest.fn().mockResolvedValue('processed-avatar-url');
getStrategyFunctions.mockReturnValue({ processAvatar: mockProcessAvatar });
resizeAvatar.mockResolvedValue(Buffer.from('resized-image'));
const oldUser = {
_id: 'user123',
avatar: null,
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(resizeAvatar).toHaveBeenCalledWith({
userId: 'user123',
input: avatarUrl,
});
expect(mockProcessAvatar).toHaveBeenCalledWith({
buffer: Buffer.from('resized-image'),
userId: 'user123',
manual: 'false',
});
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: 'processed-avatar-url' });
});
it('should not update if avatar already has manual flag in non-local storage', async () => {
process.env.CDN_PROVIDER = 's3';
const oldUser = {
_id: 'user123',
avatar: 'https://cdn.example.com/avatar.png?manual=true',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(resizeAvatar).not.toHaveBeenCalled();
expect(updateUser).not.toHaveBeenCalled();
});
it('should handle avatar with query parameters but without manual flag', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/avatar.png?size=large&format=webp',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle empty string avatar', async () => {
const oldUser = {
_id: 'user123',
avatar: '',
};
const avatarUrl = 'https://example.com/avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle avatar with manual=false parameter', async () => {
const oldUser = {
_id: 'user123',
avatar: 'https://example.com/avatar.png?manual=false',
};
const avatarUrl = 'https://example.com/new-avatar.png';
await handleExistingUser(oldUser, avatarUrl);
expect(updateUser).toHaveBeenCalledWith('user123', { avatar: avatarUrl });
});
it('should handle oldUser being null gracefully', async () => {
const avatarUrl = 'https://example.com/avatar.png';
// This should throw an error when trying to access oldUser._id
await expect(handleExistingUser(null, avatarUrl)).rejects.toThrow();
});
});

View File

@@ -2,7 +2,6 @@ const fs = require('fs');
const path = require('path');
const fetch = require('node-fetch');
const passport = require('passport');
const { ErrorTypes } = require('librechat-data-provider');
const { hashToken, logger } = require('@librechat/data-schemas');
const { Strategy: SamlStrategy } = require('@node-saml/passport-saml');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
@@ -204,15 +203,6 @@ async function setupSaml() {
);
}
if (user && user.provider !== 'saml') {
logger.info(
`[samlStrategy] User ${user.email} already exists with provider ${user.provider}`,
);
return done(null, false, {
message: ErrorTypes.AUTH_FAILED,
});
}
const fullName = getFullName(profile);
const username = convertToUsername(

View File

@@ -378,11 +378,11 @@ u7wlOSk+oFzDIO/UILIA
});
it('should update an existing user on login', async () => {
// Set up findUser to return an existing user with saml provider
// Set up findUser to return an existing user
const { findUser } = require('~/models');
const existingUser = {
_id: 'existing-user-id',
provider: 'saml',
provider: 'local',
email: baseProfile.email,
samlId: '',
username: 'oldusername',
@@ -400,26 +400,6 @@ u7wlOSk+oFzDIO/UILIA
expect(user.email).toBe(baseProfile.email);
});
it('should block login when email exists with different provider', async () => {
// Set up findUser to return a user with different provider
const { findUser } = require('~/models');
const existingUser = {
_id: 'existing-user-id',
provider: 'google',
email: baseProfile.email,
googleId: 'some-google-id',
username: 'existinguser',
name: 'Existing User',
};
findUser.mockResolvedValue(existingUser);
const profile = { ...baseProfile };
const result = await validate(profile);
expect(result.user).toBe(false);
expect(result.details.message).toBe(require('librechat-data-provider').ErrorTypes.AUTH_FAILED);
});
it('should attempt to download and save the avatar if picture is provided', async () => {
const profile = { ...baseProfile };

View File

@@ -1,7 +1,6 @@
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { ErrorTypes } = require('librechat-data-provider');
const { createSocialUser, handleExistingUser } = require('./process');
const { isEnabled } = require('~/server/utils');
const { findUser } = require('~/models');
const socialLogin =
@@ -12,20 +11,12 @@ const socialLogin =
profile,
});
const existingUser = await findUser({ email: email.trim() });
const oldUser = await findUser({ email: email.trim() });
const ALLOW_SOCIAL_REGISTRATION = isEnabled(process.env.ALLOW_SOCIAL_REGISTRATION);
if (existingUser?.provider === provider) {
await handleExistingUser(existingUser, avatarUrl);
return cb(null, existingUser);
} else if (existingUser) {
logger.info(
`[${provider}Login] User ${email} already exists with provider ${existingUser.provider}`,
);
const error = new Error(ErrorTypes.AUTH_FAILED);
error.code = ErrorTypes.AUTH_FAILED;
error.provider = existingUser.provider;
return cb(error);
if (oldUser) {
await handleExistingUser(oldUser, avatarUrl);
return cb(null, oldUser);
}
if (ALLOW_SOCIAL_REGISTRATION) {

View File

@@ -1370,7 +1370,7 @@
* @property {string} [model] - The model that the assistant used for this run.
* @property {string} [instructions] - The instructions that the assistant used for this run.
* @property {string} [additional_instructions] - Optional. Appends additional instructions
* at the end of the instructions for the run. This is useful for modifying
* at theend of the instructions for the run. This is useful for modifying
* @property {Tool[]} [tools] - The list of tools used for this run.
* @property {string[]} [file_ids] - The list of File IDs used for this run.
* @property {Object} [metadata] - Metadata associated with this run.

View File

@@ -19,9 +19,6 @@ const openAIModels = {
'gpt-4.1': 1047576,
'gpt-4.1-mini': 1047576,
'gpt-4.1-nano': 1047576,
'gpt-5': 400000,
'gpt-5-mini': 400000,
'gpt-5-nano': 400000,
'gpt-4o': 127500, // -500 from max
'gpt-4o-mini': 127500, // -500 from max
'gpt-4o-2024-05-13': 127500, // -500 from max
@@ -199,7 +196,6 @@ const amazonModels = {
'amazon.nova-micro-v1:0': 127000, // -1000 from max,
'amazon.nova-lite-v1:0': 295000, // -5000 from max,
'amazon.nova-pro-v1:0': 295000, // -5000 from max,
'amazon.nova-premier-v1:0': 995000, // -5000 from max,
};
const bedrockModels = {
@@ -237,9 +233,6 @@ const aggregateModels = {
...xAIModels,
// misc.
kimi: 131000,
// GPT-OSS
'gpt-oss-20b': 131000,
'gpt-oss-120b': 131000,
};
const maxTokensMap = {
@@ -256,11 +249,6 @@ const modelMaxOutputs = {
o1: 32268, // -500 from max: 32,768
'o1-mini': 65136, // -500 from max: 65,536
'o1-preview': 32268, // -500 from max: 32,768
'gpt-5': 128000,
'gpt-5-mini': 128000,
'gpt-5-nano': 128000,
'gpt-oss-20b': 131000,
'gpt-oss-120b': 131000,
system_default: 1024,
};
@@ -479,11 +467,10 @@ const tiktokenModels = new Set([
]);
module.exports = {
tiktokenModels,
maxTokensMap,
inputSchema,
modelSchema,
maxTokensMap,
tiktokenModels,
maxOutputTokensMap,
matchModelName,
processModelData,
getModelMaxTokens,

View File

@@ -1,11 +1,5 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
maxOutputTokensMap,
getModelMaxTokens,
processModelData,
matchModelName,
maxTokensMap,
} = require('./tokens');
const { getModelMaxTokens, processModelData, matchModelName, maxTokensMap } = require('./tokens');
describe('getModelMaxTokens', () => {
test('should return correct tokens for exact match', () => {
@@ -156,35 +150,6 @@ describe('getModelMaxTokens', () => {
);
});
test('should return correct tokens for gpt-5 matches', () => {
expect(getModelMaxTokens('gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
expect(getModelMaxTokens('gpt-5-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
expect(getModelMaxTokens('openai/gpt-5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5']);
expect(getModelMaxTokens('gpt-5-2025-01-30')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5'],
);
});
test('should return correct tokens for gpt-5-mini matches', () => {
expect(getModelMaxTokens('gpt-5-mini')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini']);
expect(getModelMaxTokens('gpt-5-mini-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
);
expect(getModelMaxTokens('openai/gpt-5-mini')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-mini'],
);
});
test('should return correct tokens for gpt-5-nano matches', () => {
expect(getModelMaxTokens('gpt-5-nano')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano']);
expect(getModelMaxTokens('gpt-5-nano-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
);
expect(getModelMaxTokens('openai/gpt-5-nano')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-5-nano'],
);
});
test('should return correct tokens for Anthropic models', () => {
const models = [
'claude-2.1',
@@ -384,39 +349,6 @@ describe('getModelMaxTokens', () => {
expect(getModelMaxTokens('o3')).toBe(o3Tokens);
expect(getModelMaxTokens('openai/o3')).toBe(o3Tokens);
});
test('should return correct tokens for GPT-OSS models', () => {
const expected = maxTokensMap[EModelEndpoint.openAI]['gpt-oss-20b'];
['gpt-oss-20b', 'gpt-oss-120b', 'openai/gpt-oss-20b', 'openai/gpt-oss-120b'].forEach((name) => {
expect(getModelMaxTokens(name)).toBe(expected);
});
});
test('should return correct max output tokens for GPT-5 models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
maxOutputTokensMap[EModelEndpoint.openAI][model],
);
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
);
});
});
test('should return correct max output tokens for GPT-OSS models', () => {
const { getModelMaxOutputTokens } = require('./tokens');
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
maxOutputTokensMap[EModelEndpoint.openAI][model],
);
expect(getModelMaxOutputTokens(model, EModelEndpoint.azureOpenAI)).toBe(
maxOutputTokensMap[EModelEndpoint.azureOpenAI][model],
);
});
});
});
describe('matchModelName', () => {
@@ -488,25 +420,6 @@ describe('matchModelName', () => {
expect(matchModelName('gpt-4.1-nano-2024-08-06')).toBe('gpt-4.1-nano');
});
it('should return the closest matching key for gpt-5 matches', () => {
expect(matchModelName('openai/gpt-5')).toBe('gpt-5');
expect(matchModelName('gpt-5-preview')).toBe('gpt-5');
expect(matchModelName('gpt-5-2025-01-30')).toBe('gpt-5');
expect(matchModelName('gpt-5-2025-01-30-0130')).toBe('gpt-5');
});
it('should return the closest matching key for gpt-5-mini matches', () => {
expect(matchModelName('openai/gpt-5-mini')).toBe('gpt-5-mini');
expect(matchModelName('gpt-5-mini-preview')).toBe('gpt-5-mini');
expect(matchModelName('gpt-5-mini-2025-01-30')).toBe('gpt-5-mini');
});
it('should return the closest matching key for gpt-5-nano matches', () => {
expect(matchModelName('openai/gpt-5-nano')).toBe('gpt-5-nano');
expect(matchModelName('gpt-5-nano-preview')).toBe('gpt-5-nano');
expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
});
// Tests for Google models
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');

BIN
bun.lockb

Binary file not shown.

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/frontend",
"version": "v0.8.0-rc2",
"version": "v0.7.9",
"description": "",
"type": "module",
"scripts": {
@@ -34,7 +34,6 @@
"@dicebear/collection": "^9.2.2",
"@dicebear/core": "^9.2.2",
"@headlessui/react": "^2.1.2",
"@librechat/client": "*",
"@marsidev/react-turnstile": "^1.1.0",
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-alert-dialog": "^1.0.2",
@@ -57,8 +56,8 @@
"@react-spring/web": "^9.7.5",
"@tanstack/react-query": "^4.28.0",
"@tanstack/react-table": "^8.11.7",
"class-variance-authority": "^0.7.1",
"clsx": "^2.1.1",
"class-variance-authority": "^0.6.0",
"clsx": "^1.2.1",
"copy-to-clipboard": "^3.3.3",
"cross-env": "^7.0.3",
"date-fns": "^3.3.1",
@@ -71,12 +70,11 @@
"i18next": "^24.2.2",
"i18next-browser-languagedetector": "^8.0.3",
"input-otp": "^1.4.2",
"jotai": "^2.12.5",
"js-cookie": "^3.0.5",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"lucide-react": "^0.394.0",
"match-sorter": "^8.1.0",
"match-sorter": "^6.3.4",
"micromark-extension-llm-math": "^3.1.0",
"qrcode.react": "^4.2.0",
"rc-input-number": "^7.4.2",

View File

@@ -4,10 +4,10 @@ import { RouterProvider } from 'react-router-dom';
import * as RadixToast from '@radix-ui/react-toast';
import { HTML5Backend } from 'react-dnd-html5-backend';
import { ReactQueryDevtools } from '@tanstack/react-query-devtools';
import { Toast, ThemeProvider, ToastProvider } from '@librechat/client';
import { QueryClient, QueryClientProvider, QueryCache } from '@tanstack/react-query';
import { ScreenshotProvider, useApiErrorBoundary } from './hooks';
import { getThemeFromEnv } from './utils/getThemeFromEnv';
import { ScreenshotProvider, ThemeProvider, useApiErrorBoundary } from './hooks';
import { ToastProvider } from './Providers';
import Toast from './components/ui/Toast';
import { LiveAnnouncer } from '~/a11y';
import { router } from './routes';
@@ -24,23 +24,11 @@ const App = () => {
}),
});
// Load theme from environment variables if available
const envTheme = getThemeFromEnv();
return (
<QueryClientProvider client={queryClient}>
<RecoilRoot>
<LiveAnnouncer>
<ThemeProvider
// Only pass initialTheme and themeRGB if environment theme exists
// This allows localStorage values to persist when no env theme is set
{...(envTheme && { initialTheme: 'system', themeRGB: envTheme })}
>
{/* The ThemeProvider will automatically:
1. Apply dark/light mode classes
2. Apply custom theme colors if envTheme is provided
3. Otherwise use stored theme preferences from localStorage
4. Fall back to default theme colors if nothing is stored */}
<ThemeProvider>
<RadixToast.Provider>
<ToastProvider>
<DndProvider backend={HTML5Backend}>

View File

@@ -1,46 +0,0 @@
import React, { createContext, useContext, useMemo } from 'react';
import type { TMessage } from 'librechat-data-provider';
import { useChatContext } from './ChatContext';
import { getLatestText } from '~/utils';
interface ArtifactsContextValue {
isSubmitting: boolean;
latestMessageId: string | null;
latestMessageText: string;
conversationId: string | null;
}
const ArtifactsContext = createContext<ArtifactsContextValue | undefined>(undefined);
export function ArtifactsProvider({ children }: { children: React.ReactNode }) {
const { isSubmitting, latestMessage, conversation } = useChatContext();
const latestMessageText = useMemo(() => {
return getLatestText({
messageId: latestMessage?.messageId ?? null,
text: latestMessage?.text ?? null,
content: latestMessage?.content ?? null,
} as TMessage);
}, [latestMessage?.messageId, latestMessage?.text, latestMessage?.content]);
/** Context value only created when relevant values change */
const contextValue = useMemo<ArtifactsContextValue>(
() => ({
isSubmitting,
latestMessageText,
latestMessageId: latestMessage?.messageId ?? null,
conversationId: conversation?.conversationId ?? null,
}),
[isSubmitting, latestMessage?.messageId, latestMessageText, conversation?.conversationId],
);
return <ArtifactsContext.Provider value={contextValue}>{children}</ArtifactsContext.Provider>;
}
export function useArtifactsContext() {
const context = useContext(ArtifactsContext);
if (!context) {
throw new Error('useArtifactsContext must be used within ArtifactsProvider');
}
return context;
}

View File

@@ -1,4 +1,4 @@
import { createContext, useContext, ReactNode } from 'react';
import { createContext, useContext } from 'react';
import type { TShowToast } from '~/common';
import useToast from '~/hooks/useToast';
@@ -14,7 +14,7 @@ export function useToastContext() {
return useContext(ToastContext);
}
export default function ToastProvider({ children }: { children: ReactNode }) {
export default function ToastProvider({ children }) {
const { showToast } = useToast();
return <ToastContext.Provider value={{ showToast }}>{children}</ToastContext.Provider>;

Some files were not shown because too many files have changed in this diff Show More