Compare commits

..

34 Commits

Author SHA1 Message Date
Dustin Healy
3fd84216cf ci: add unit tests for backend prompt file attachment code and for uploaded file processing 2025-09-11 00:53:00 -07:00
Dustin Healy
d8997fdf0e feat: capabilities filtering on AttachFileButton 2025-09-11 00:53:00 -07:00
Dustin Healy
bb6ee0dc58 feat: fix mismatched sizes for icons 2025-09-11 00:53:00 -07:00
Dustin Healy
c4e86539c6 chore: remove debugging logs and tidy up misc stuff 2025-09-11 00:53:00 -07:00
Dustin Healy
f1bc15b3d5 chore: remove unnecessary comments 2025-09-11 00:53:00 -07:00
Dustin Healy
58678be0f8 chore: clean up comments and remove debugging log statements 2025-09-11 00:53:00 -07:00
Dustin Healy
441e69181c chore: import order 2025-09-11 00:53:00 -07:00
Dustin Healy
2c1d7a6b71 refactor: remove unnecessary onFileChange, just handle onSave stuff in onFilesChange 2025-09-11 00:53:00 -07:00
Dustin Healy
384d6c870b chore: remove unused translation string 2025-09-11 00:53:00 -07:00
Dustin Healy
013c002cbb fix: bring back proper deletion handling we lost with refactor for onRemoveHandler 2025-09-11 00:53:00 -07:00
Dustin Healy
e062ed5832 fix: refactor to ifs rather than switch case to maintain codebase style 2025-09-11 00:53:00 -07:00
Dustin Healy
d07d05a8d0 feat: add localization strings for tool resource types in file preview 2025-09-11 00:53:00 -07:00
Dustin Healy
5b38ce8fd9 fix: type guard for compiler 2025-09-11 00:53:00 -07:00
Dustin Healy
0a61e3cb39 feat: remove propdrilling for custom onFileRemove handler and just make it default behavior for PromptFile rather than working around old deletion handlers 2025-09-11 00:53:00 -07:00
Dustin Healy
a52c37faad chore: revert unnecessary change to message file handling 2025-09-11 00:53:00 -07:00
Dustin Healy
1f49c569c3 chore: remove debugging logs 2025-09-11 00:53:00 -07:00
Dustin Healy
479ce5df48 fix: use proper enum for promptGroup in useResourcePermissions arg and remove console.logs
chore: remove debugging logs

chore: remove debugging logs

chore: remove unused component
2025-09-11 00:53:00 -07:00
Dustin Healy
c37e368d98 chore: remove unused component and translation strings 2025-09-11 00:53:00 -07:00
Dustin Healy
fd29cbed4f chore: remove debug logs 2025-09-11 00:53:00 -07:00
Dustin Healy
277a321155 fix: attachments go in new prompt so that sidenav bar updates without refresh 2025-09-11 00:53:00 -07:00
Dustin Healy
0dba5c6450 fix: paperclip was getting larger as title got longer 2025-09-11 00:53:00 -07:00
Dustin Healy
93490764e6 refactor: move attach button to bottom of div when no attachments present 2025-09-11 00:53:00 -07:00
Dustin Healy
094320fcd9 feat: auto send working (still needs clean up) 2025-09-11 00:53:00 -07:00
Dustin Healy
cee11d3353 chore: address ESLint comments 2025-09-11 00:53:00 -07:00
Dustin Healy
69772317b2 chore: clean up usePromptFileHandling 2025-09-11 00:53:00 -07:00
Dustin Healy
607a5a2fcf feat: chat ui and functionality for prompts (auto-send not working) 2025-09-11 00:53:00 -07:00
Dustin Healy
7c3356e10b fix: deletion doesn't cause reference loss in versioning anymore - file reference maintained in db 2025-09-11 00:53:00 -07:00
Dustin Healy
d4fd0047cb fix: deletion + version updates not working properly 2025-09-11 00:53:00 -07:00
Dustin Healy
797fdf4286 feat: add attach section to PromptForm 2025-09-11 00:53:00 -07:00
Dustin Healy
623dfa5b63 feat: add file attachment section PromptFiles, new file display: PromptFile (needed for deletion to work properly), and usePromptFileHandling hook 2025-09-11 00:53:00 -07:00
Dustin Healy
600641d02f feat: add SharePoint picker support 2025-09-11 00:53:00 -07:00
Dustin Healy
d65accddc1 feat: add AttachFileButton for uploading files from a prompt context rather than chat
This is pretty much a stripped down version of AttachFileMenu so ofc there is duplication across this new component and AttachFileMenu but I believe it outweighs the increased complexity that would come from attempting to handle both contexts within just AttachFileMenu in regards to ephemeral agents and the file handling hooks - though we could probably refactor this without too much hassle later on in the file upload unification push once things are more settled.
2025-09-11 00:53:00 -07:00
Dustin Healy
195d2e2014 feat: add tool_resources to the productionPrompt for making and getting groups 2025-09-11 00:53:00 -07:00
Dustin Healy
c0ae6f277f feat: add schemas and types 2025-09-11 00:53:00 -07:00
316 changed files with 6041 additions and 14158 deletions

View File

@@ -163,10 +163,10 @@ GOOGLE_KEY=user_provided
# GOOGLE_AUTH_HEADER=true
# Gemini API (AI Studio)
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite-preview-06-17,gemini-2.0-flash,gemini-2.0-flash-lite
# Vertex AI
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite-preview-06-17,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001

View File

@@ -1,4 +1,4 @@
# v0.8.0
# v0.8.0-rc3
# Base node image
FROM node:20-alpine AS node
@@ -30,7 +30,7 @@ RUN \
# Allow mounting of these files, which have no default
touch .env ; \
# Create directories for the volumes to inherit the correct permissions
mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \
mkdir -p /app/client/public/images /app/api/logs ; \
npm config set fetch-retry-maxtimeout 600000 ; \
npm config set fetch-retries 5 ; \
npm config set fetch-retry-mintimeout 15000 ; \
@@ -44,6 +44,8 @@ RUN \
npm prune --production; \
npm cache clean --force
RUN mkdir -p /app/client/public/images /app/api/logs
# Node API setup
EXPOSE 3080
ENV HOST=0.0.0.0

View File

@@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.8.0
# v0.8.0-rc3
# Base for all builds
FROM node:20-alpine AS base-min

View File

@@ -68,19 +68,19 @@ const primeFiles = async (options) => {
/**
*
* @param {Object} options
* @param {string} options.userId
* @param {ServerRequest} options.req
* @param {Array<{ file_id: string; filename: string }>} options.files
* @param {string} [options.entity_id]
* @param {boolean} [options.fileCitations=false] - Whether to include citation instructions
* @returns
*/
const createFileSearchTool = async ({ userId, files, entity_id, fileCitations = false }) => {
const createFileSearchTool = async ({ req, files, entity_id, fileCitations = false }) => {
return tool(
async ({ query }) => {
if (files.length === 0) {
return 'No files to search. Instruct the user to add files for the search.';
}
const jwtToken = generateShortLivedToken(userId);
const jwtToken = generateShortLivedToken(req.user.id);
if (!jwtToken) {
return 'There was an error authenticating the file search request.';
}

View File

@@ -1,13 +1,8 @@
const { logger } = require('@librechat/data-schemas');
const { SerpAPI } = require('@langchain/community/tools/serpapi');
const { Calculator } = require('@langchain/community/tools/calculator');
const { mcpToolPattern, loadWebSearchAuth, checkAccess } = require('@librechat/api');
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
const {
checkAccess,
createSafeUser,
mcpToolPattern,
loadWebSearchAuth,
} = require('@librechat/api');
const {
Tools,
Constants,
@@ -38,7 +33,7 @@ const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSe
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { createMCPTool, createMCPTools } = require('~/server/services/MCP');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { getMCPServerTools } = require('~/server/services/Config');
const { getCachedTools } = require('~/server/services/Config');
const { getRoleByName } = require('~/models/Role');
/**
@@ -255,6 +250,7 @@ const loadTools = async ({
/** @type {Record<string, string>} */
const toolContextMap = {};
const cachedTools = (await getCachedTools({ userId: user, includeGlobal: true })) ?? {};
const requestedMCPTools = {};
for (const tool of tools) {
@@ -311,7 +307,7 @@ const loadTools = async ({
}
return createFileSearchTool({
userId: user,
req: options.req,
files,
entity_id: agent?.id,
fileCitations,
@@ -344,7 +340,7 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
});
};
continue;
} else if (tool && mcpToolPattern.test(tool)) {
} else if (tool && cachedTools && mcpToolPattern.test(tool)) {
const [toolName, serverName] = tool.split(Constants.mcp_delimiter);
if (toolName === Constants.mcp_server) {
/** Placeholder used for UI purposes */
@@ -357,21 +353,33 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
continue;
}
if (toolName === Constants.mcp_all) {
requestedMCPTools[serverName] = [
{
type: 'all',
const currentMCPGenerator = async (index) =>
createMCPTools({
req: options.req,
res: options.res,
index,
serverName,
},
];
userMCPAuthMap,
model: agent?.model ?? model,
provider: agent?.provider ?? endpoint,
signal,
});
requestedMCPTools[serverName] = [currentMCPGenerator];
continue;
}
const currentMCPGenerator = async (index) =>
createMCPTool({
index,
req: options.req,
res: options.res,
toolKey: tool,
userMCPAuthMap,
model: agent?.model ?? model,
provider: agent?.provider ?? endpoint,
signal,
});
requestedMCPTools[serverName] = requestedMCPTools[serverName] || [];
requestedMCPTools[serverName].push({
type: 'single',
toolKey: tool,
serverName,
});
requestedMCPTools[serverName].push(currentMCPGenerator);
continue;
}
@@ -414,65 +422,24 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
const mcpToolPromises = [];
/** MCP server tools are initialized sequentially by server */
let index = -1;
const failedMCPServers = new Set();
const safeUser = createSafeUser(options.req?.user);
for (const [serverName, toolConfigs] of Object.entries(requestedMCPTools)) {
for (const [serverName, generators] of Object.entries(requestedMCPTools)) {
index++;
/** @type {LCAvailableTools} */
let availableTools;
for (const config of toolConfigs) {
for (const generator of generators) {
try {
if (failedMCPServers.has(serverName)) {
continue;
}
const mcpParams = {
index,
signal,
user: safeUser,
userMCPAuthMap,
res: options.res,
model: agent?.model ?? model,
serverName: config.serverName,
provider: agent?.provider ?? endpoint,
};
if (config.type === 'all' && toolConfigs.length === 1) {
/** Handle async loading for single 'all' tool config */
if (generator && generators.length === 1) {
mcpToolPromises.push(
createMCPTools(mcpParams).catch((error) => {
generator(index).catch((error) => {
logger.error(`Error loading ${serverName} tools:`, error);
return null;
}),
);
continue;
}
if (!availableTools) {
try {
availableTools = await getMCPServerTools(serverName);
} catch (error) {
logger.error(`Error fetching available tools for MCP server ${serverName}:`, error);
}
}
/** Handle synchronous loading */
const mcpTool =
config.type === 'all'
? await createMCPTools(mcpParams)
: await createMCPTool({
...mcpParams,
availableTools,
toolKey: config.toolKey,
});
const mcpTool = await generator(index);
if (Array.isArray(mcpTool)) {
loadedTools.push(...mcpTool);
} else if (mcpTool) {
loadedTools.push(mcpTool);
} else {
failedMCPServers.add(serverName);
logger.warn(
`MCP tool creation failed for "${config.toolKey}", server may be unavailable or unauthenticated.`,
);
}
} catch (error) {
logger.error(`Error loading MCP tool for server ${serverName}:`, error);

View File

@@ -1,5 +1,4 @@
const fs = require('fs');
const { logger } = require('@librechat/data-schemas');
const { math, isEnabled } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
@@ -35,35 +34,13 @@ if (FORCED_IN_MEMORY_CACHE_NAMESPACES.length > 0) {
}
}
/** Helper function to safely read Redis CA certificate from file
* @returns {string|null} The contents of the CA certificate file, or null if not set or on error
*/
const getRedisCA = () => {
const caPath = process.env.REDIS_CA;
if (!caPath) {
return null;
}
try {
if (fs.existsSync(caPath)) {
return fs.readFileSync(caPath, 'utf8');
} else {
logger.warn(`Redis CA certificate file not found: ${caPath}`);
return null;
}
} catch (error) {
logger.error(`Failed to read Redis CA certificate file '${caPath}':`, error);
return null;
}
};
const cacheConfig = {
FORCED_IN_MEMORY_CACHE_NAMESPACES,
USE_REDIS,
REDIS_URI: process.env.REDIS_URI,
REDIS_USERNAME: process.env.REDIS_USERNAME,
REDIS_PASSWORD: process.env.REDIS_PASSWORD,
REDIS_CA: getRedisCA(),
REDIS_CA: process.env.REDIS_CA ? fs.readFileSync(process.env.REDIS_CA, 'utf8') : null,
REDIS_KEY_PREFIX: process.env[REDIS_KEY_PREFIX_VAR] || REDIS_KEY_PREFIX || '',
REDIS_MAX_LISTENERS: math(process.env.REDIS_MAX_LISTENERS, 40),
REDIS_PING_INTERVAL: math(process.env.REDIS_PING_INTERVAL, 0),

View File

@@ -1,6 +1,6 @@
const { MCPManager, FlowStateManager } = require('@librechat/api');
const { EventSource } = require('eventsource');
const { Time } = require('librechat-data-provider');
const { MCPManager, FlowStateManager, OAuthReconnectionManager } = require('@librechat/api');
const logger = require('./winston');
global.EventSource = EventSource;
@@ -26,6 +26,4 @@ module.exports = {
createMCPManager: MCPManager.createInstance,
getMCPManager: MCPManager.getInstance,
getFlowStateManager,
createOAuthReconnectionManager: OAuthReconnectionManager.createInstance,
getOAuthReconnectionManager: OAuthReconnectionManager.getInstance,
};

View File

@@ -1,8 +1,10 @@
const mongoose = require('mongoose');
const { MeiliSearch } = require('meilisearch');
const { logger } = require('@librechat/data-schemas');
const { FlowStateManager } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
const { isEnabled, FlowStateManager } = require('@librechat/api');
const { isEnabled } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const Conversation = mongoose.models.Conversation;
@@ -29,81 +31,6 @@ class MeiliSearchClient {
}
}
/**
* Ensures indexes have proper filterable attributes configured and checks if documents have user field
* @param {MeiliSearch} client - MeiliSearch client instance
* @returns {Promise<boolean>} - true if configuration was updated or re-sync is needed
*/
async function ensureFilterableAttributes(client) {
try {
// Check and update messages index
try {
const messagesIndex = client.index('messages');
const settings = await messagesIndex.getSettings();
if (!settings.filterableAttributes || !settings.filterableAttributes.includes('user')) {
logger.info('[indexSync] Configuring messages index to filter by user...');
await messagesIndex.updateSettings({
filterableAttributes: ['user'],
});
logger.info('[indexSync] Messages index configured for user filtering');
logger.info('[indexSync] Index configuration updated. Full re-sync will be triggered.');
return true;
}
// Check if existing documents have user field indexed
try {
const searchResult = await messagesIndex.search('', { limit: 1 });
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
logger.info('[indexSync] Existing messages missing user field, re-sync needed');
return true;
}
} catch (searchError) {
logger.debug('[indexSync] Could not check message documents:', searchError.message);
}
} catch (error) {
if (error.code !== 'index_not_found') {
logger.warn('[indexSync] Could not check/update messages index settings:', error.message);
}
}
// Check and update conversations index
try {
const convosIndex = client.index('convos');
const settings = await convosIndex.getSettings();
if (!settings.filterableAttributes || !settings.filterableAttributes.includes('user')) {
logger.info('[indexSync] Configuring convos index to filter by user...');
await convosIndex.updateSettings({
filterableAttributes: ['user'],
});
logger.info('[indexSync] Convos index configured for user filtering');
logger.info('[indexSync] Index configuration updated. Full re-sync will be triggered.');
return true;
}
// Check if existing documents have user field indexed
try {
const searchResult = await convosIndex.search('', { limit: 1 });
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
logger.info('[indexSync] Existing conversations missing user field, re-sync needed');
return true;
}
} catch (searchError) {
logger.debug('[indexSync] Could not check conversation documents:', searchError.message);
}
} catch (error) {
if (error.code !== 'index_not_found') {
logger.warn('[indexSync] Could not check/update convos index settings:', error.message);
}
}
} catch (error) {
logger.error('[indexSync] Error ensuring filterable attributes:', error);
}
return false;
}
/**
* Performs the actual sync operations for messages and conversations
*/
@@ -120,27 +47,12 @@ async function performSync() {
return { messagesSync: false, convosSync: false };
}
/** Ensures indexes have proper filterable attributes configured */
const configUpdated = await ensureFilterableAttributes(client);
let messagesSync = false;
let convosSync = false;
// If configuration was just updated or documents are missing user field, force a full re-sync
if (configUpdated) {
logger.info('[indexSync] Forcing full re-sync to ensure user field is properly indexed...');
// Reset sync flags to force full re-sync
await Message.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
await Conversation.collection.updateMany(
{ _meiliIndex: true },
{ $set: { _meiliIndex: false } },
);
}
// Check if we need to sync messages
const messageProgress = await Message.getSyncProgress();
if (!messageProgress.isComplete || configUpdated) {
if (!messageProgress.isComplete) {
logger.info(
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
);
@@ -167,7 +79,7 @@ async function performSync() {
// Check if we need to sync conversations
const convoProgress = await Conversation.getSyncProgress();
if (!convoProgress.isComplete || configUpdated) {
if (!convoProgress.isComplete) {
logger.info(
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
);

View File

@@ -11,7 +11,7 @@ const {
getProjectByName,
} = require('./Project');
const { removeAllPermissions } = require('~/server/services/PermissionService');
const { getMCPServerTools } = require('~/server/services/Config');
const { getCachedTools } = require('~/server/services/Config');
const { getActions } = require('./Action');
const { Agent } = require('~/db/models');
@@ -49,14 +49,6 @@ const createAgent = async (agentData) => {
*/
const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean();
/**
* Get multiple agent documents based on the provided search parameters.
*
* @param {Object} searchParameter - The search parameters to find agents.
* @returns {Promise<Agent[]>} Array of agent documents as plain objects.
*/
const getAgents = async (searchParameter) => await Agent.find(searchParameter).lean();
/**
* Load an agent based on the provided ID
*
@@ -69,6 +61,8 @@ const getAgents = async (searchParameter) => await Agent.find(searchParameter).l
*/
const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _m }) => {
const { model, ...model_parameters } = _m;
/** @type {Record<string, FunctionTool>} */
const availableTools = await getCachedTools({ userId: req.user.id, includeGlobal: true });
/** @type {TEphemeralAgent | null} */
const ephemeralAgent = req.body.ephemeralAgent;
const mcpServers = new Set(ephemeralAgent?.mcp);
@@ -86,18 +80,22 @@ const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _
const addedServers = new Set();
if (mcpServers.size > 0) {
for (const toolName of Object.keys(availableTools)) {
if (!toolName.includes(mcp_delimiter)) {
continue;
}
const mcpServer = toolName.split(mcp_delimiter)?.[1];
if (mcpServer && mcpServers.has(mcpServer)) {
addedServers.add(mcpServer);
tools.push(toolName);
}
}
for (const mcpServer of mcpServers) {
if (addedServers.has(mcpServer)) {
continue;
}
const serverTools = await getMCPServerTools(mcpServer);
if (!serverTools) {
tools.push(`${mcp_all}${mcp_delimiter}${mcpServer}`);
addedServers.add(mcpServer);
continue;
}
tools.push(...Object.keys(serverTools));
addedServers.add(mcpServer);
tools.push(`${mcp_all}${mcp_delimiter}${mcpServer}`);
}
}
@@ -837,7 +835,6 @@ const countPromotedAgents = async () => {
module.exports = {
getAgent,
getAgents,
loadAgent,
createAgent,
updateAgent,

View File

@@ -8,7 +8,6 @@ process.env.CREDS_IV = '0123456789abcdef';
jest.mock('~/server/services/Config', () => ({
getCachedTools: jest.fn(),
getMCPServerTools: jest.fn(),
}));
const mongoose = require('mongoose');
@@ -31,7 +30,7 @@ const {
generateActionMetadataHash,
} = require('./Agent');
const permissionService = require('~/server/services/PermissionService');
const { getCachedTools, getMCPServerTools } = require('~/server/services/Config');
const { getCachedTools } = require('~/server/services/Config');
const { AclEntry } = require('~/db/models');
/**
@@ -1930,16 +1929,6 @@ describe('models/Agent', () => {
another_tool: {},
});
// Mock getMCPServerTools to return tools for each server
getMCPServerTools.mockImplementation(async (server) => {
if (server === 'server1') {
return { tool1_mcp_server1: {} };
} else if (server === 'server2') {
return { tool2_mcp_server2: {} };
}
return null;
});
const mockReq = {
user: { id: 'user123' },
body: {
@@ -2124,14 +2113,6 @@ describe('models/Agent', () => {
getCachedTools.mockResolvedValue(availableTools);
// Mock getMCPServerTools to return all tools for server1
getMCPServerTools.mockImplementation(async (server) => {
if (server === 'server1') {
return availableTools; // All 100 tools belong to server1
}
return null;
});
const mockReq = {
user: { id: 'user123' },
body: {
@@ -2673,17 +2654,6 @@ describe('models/Agent', () => {
tool_mcp_server2: {}, // Different server
});
// Mock getMCPServerTools to return only tools matching the server
getMCPServerTools.mockImplementation(async (server) => {
if (server === 'server1') {
// Only return tool that correctly matches server1 format
return { tool_mcp_server1: {} };
} else if (server === 'server2') {
return { tool_mcp_server2: {} };
}
return null;
});
const mockReq = {
user: { id: 'user123' },
body: {

View File

@@ -174,7 +174,7 @@ module.exports = {
if (search) {
try {
const meiliResults = await Conversation.meiliSearch(search, { filter: `user = "${user}"` });
const meiliResults = await Conversation.meiliSearch(search);
const matchingIds = Array.isArray(meiliResults.hits)
? meiliResults.hits.map((result) => result.conversationId)
: [];

View File

@@ -239,46 +239,10 @@ const updateTagsForConversation = async (user, conversationId, tags) => {
}
};
/**
* Increments tag counts for existing tags only.
* @param {string} user - The user ID.
* @param {string[]} tags - Array of tag names to increment
* @returns {Promise<void>}
*/
const bulkIncrementTagCounts = async (user, tags) => {
if (!tags || tags.length === 0) {
return;
}
try {
const uniqueTags = [...new Set(tags.filter(Boolean))];
if (uniqueTags.length === 0) {
return;
}
const bulkOps = uniqueTags.map((tag) => ({
updateOne: {
filter: { user, tag },
update: { $inc: { count: 1 } },
},
}));
const result = await ConversationTag.bulkWrite(bulkOps);
if (result && result.modifiedCount > 0) {
logger.debug(
`user: ${user} | Incremented tag counts - modified ${result.modifiedCount} tags`,
);
}
} catch (error) {
logger.error('[bulkIncrementTagCounts] Error incrementing tag counts', error);
}
};
module.exports = {
getConversationTags,
createConversationTag,
updateConversationTag,
deleteConversationTag,
bulkIncrementTagCounts,
updateTagsForConversation,
};

View File

@@ -42,7 +42,7 @@ const getToolFilesByIds = async (fileIds, toolResourceSet) => {
$or: [],
};
if (toolResourceSet.has(EToolResources.context)) {
if (toolResourceSet.has(EToolResources.ocr)) {
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
}
if (toolResourceSet.has(EToolResources.file_search)) {

View File

@@ -51,6 +51,7 @@ const createGroupPipeline = (query, skip, limit) => {
createdAt: 1,
updatedAt: 1,
'productionPrompt.prompt': 1,
'productionPrompt.tool_resources': 1,
// 'productionPrompt._id': 1,
// 'productionPrompt.type': 1,
},
@@ -328,6 +329,7 @@ async function getListPromptGroupsByAccess({
createdAt: 1,
updatedAt: 1,
'productionPrompt.prompt': 1,
'productionPrompt.tool_resources': 1,
},
},
);
@@ -411,7 +413,10 @@ module.exports = {
prompt: newPrompt,
group: {
...newPromptGroup,
productionPrompt: { prompt: newPrompt.prompt },
productionPrompt: {
prompt: newPrompt.prompt,
tool_resources: newPrompt.tool_resources,
},
},
};
} catch (error) {

View File

@@ -562,3 +562,884 @@ describe('Prompt ACL Permissions', () => {
});
});
});
describe('Prompt Model - File Attachments', () => {
describe('Creating Prompts with tool_resources', () => {
it('should create a prompt with file attachments in tool_resources', async () => {
const testGroup = await PromptGroup.create({
name: 'Attachment Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
const promptData = {
prompt: {
prompt: 'Test prompt with file attachments',
type: 'text',
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
image_edit: {
file_ids: ['file-4'],
},
},
},
author: testUsers.owner._id,
};
const result = await promptFns.savePrompt(promptData);
expect(result.prompt).toBeTruthy();
expect(result.prompt.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
image_edit: {
file_ids: ['file-4'],
},
});
const savedPrompt = await Prompt.findById(result.prompt._id);
expect(savedPrompt.tool_resources).toEqual(promptData.prompt.tool_resources);
});
it('should create a prompt without tool_resources when none provided', async () => {
const testGroup = await PromptGroup.create({
name: 'No Attachment Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
const promptData = {
prompt: {
prompt: 'Test prompt without attachments',
type: 'text',
groupId: testGroup._id,
},
author: testUsers.owner._id,
};
const result = await promptFns.savePrompt(promptData);
expect(result.prompt).toBeTruthy();
expect(result.prompt.tool_resources).toEqual({});
const savedPrompt = await Prompt.findById(result.prompt._id);
expect(savedPrompt.tool_resources).toEqual({});
});
it('should create a prompt group with tool_resources', async () => {
const saveData = {
prompt: {
type: 'text',
prompt: 'Test prompt with file attachments',
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2'],
},
ocr: {
file_ids: ['file-3'],
},
},
},
group: {
name: 'Test Prompt Group with Attachments',
category: 'test-category',
oneliner: 'Test description',
},
author: testUsers.owner._id,
authorName: testUsers.owner.name,
};
const result = await promptFns.createPromptGroup(saveData);
expect(result.prompt).toBeTruthy();
expect(result.group).toBeTruthy();
expect(result.prompt.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2'],
},
ocr: {
file_ids: ['file-3'],
},
});
expect(result.group.productionPrompt.tool_resources).toEqual(result.prompt.tool_resources);
});
});
describe('Retrieving Prompts with tool_resources', () => {
let testGroup;
let testPrompt;
beforeEach(async () => {
testGroup = await PromptGroup.create({
name: 'Retrieval Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
testPrompt = await Prompt.create({
prompt: 'Test prompt with attachments for retrieval',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
},
});
});
afterEach(async () => {
await Prompt.deleteMany({});
await PromptGroup.deleteMany({});
});
it('should retrieve a prompt with tool_resources', async () => {
const result = await promptFns.getPrompt({ _id: testPrompt._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
});
});
it('should retrieve prompts with tool_resources by groupId', async () => {
const result = await promptFns.getPrompts({ groupId: testGroup._id });
expect(result).toBeTruthy();
expect(Array.isArray(result)).toBe(true);
expect(result.length).toBe(1);
expect(result[0].tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
});
});
it('should handle prompts without tool_resources', async () => {
const promptWithoutAttachments = await Prompt.create({
prompt: 'Test prompt without attachments',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
});
const result = await promptFns.getPrompt({ _id: promptWithoutAttachments._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toBeUndefined();
});
});
describe('Updating Prompts with tool_resources', () => {
let testGroup;
beforeEach(async () => {
testGroup = await PromptGroup.create({
name: 'Update Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
await Prompt.create({
prompt: 'Original prompt',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1'],
},
},
});
});
afterEach(async () => {
await Prompt.deleteMany({});
await PromptGroup.deleteMany({});
});
it('should update prompt with new tool_resources', async () => {
const updatedPromptData = {
prompt: {
prompt: 'Updated prompt with new attachments',
type: 'text',
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
},
},
author: testUsers.owner._id,
};
const result = await promptFns.savePrompt(updatedPromptData);
expect(result.prompt).toBeTruthy();
expect(result.prompt.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
});
});
it('should update prompt to remove tool_resources', async () => {
const updatedPromptData = {
prompt: {
prompt: 'Updated prompt without attachments',
type: 'text',
groupId: testGroup._id,
// No tool_resources field
},
author: testUsers.owner._id,
};
const result = await promptFns.savePrompt(updatedPromptData);
expect(result.prompt).toBeTruthy();
expect(result.prompt.tool_resources).toEqual({});
});
});
describe('Deleting Prompts with tool_resources', () => {
let testGroup;
let testPrompt;
beforeEach(async () => {
testGroup = await PromptGroup.create({
name: 'Deletion Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
testPrompt = await Prompt.create({
prompt: 'Prompt to be deleted',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2'],
},
execute_code: {
file_ids: ['file-3'],
},
},
});
});
afterEach(async () => {
await Prompt.deleteMany({});
await PromptGroup.deleteMany({});
});
it('should delete a prompt with tool_resources', async () => {
const result = await promptFns.deletePrompt({
promptId: testPrompt._id,
groupId: testGroup._id,
author: testUsers.owner._id,
role: SystemRoles.USER,
});
expect(result.prompt).toBe('Prompt deleted successfully');
const deletedPrompt = await Prompt.findById(testPrompt._id);
expect(deletedPrompt).toBeNull();
});
it('should delete prompt group when last prompt with tool_resources is deleted', async () => {
const result = await promptFns.deletePrompt({
promptId: testPrompt._id,
groupId: testGroup._id,
author: testUsers.owner._id,
role: SystemRoles.USER,
});
expect(result.prompt).toBe('Prompt deleted successfully');
expect(result.promptGroup).toBeTruthy();
expect(result.promptGroup.message).toBe('Prompt group deleted successfully');
const deletedPrompt = await Prompt.findById(testPrompt._id);
const deletedGroup = await PromptGroup.findById(testGroup._id);
expect(deletedPrompt).toBeNull();
expect(deletedGroup).toBeNull();
});
});
describe('Making Prompts Production with tool_resources', () => {
let testGroup;
let testPrompt;
beforeEach(async () => {
testGroup = await PromptGroup.create({
name: 'Production Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
testPrompt = await Prompt.create({
prompt: 'Prompt to be made production',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2'],
},
image_edit: {
file_ids: ['file-3'],
},
},
});
});
afterEach(async () => {
await Prompt.deleteMany({});
await PromptGroup.deleteMany({});
});
it('should make a prompt with tool_resources production', async () => {
const result = await promptFns.makePromptProduction(testPrompt._id.toString());
expect(result.message).toBe('Prompt production made successfully');
const updatedGroup = await PromptGroup.findById(testGroup._id);
expect(updatedGroup.productionId.toString()).toBe(testPrompt._id.toString());
});
it('should return error message when prompt not found', async () => {
const nonExistentId = new mongoose.Types.ObjectId().toString();
const result = await promptFns.makePromptProduction(nonExistentId);
expect(result.message).toBe('Error making prompt production');
});
});
describe('Prompt Groups with tool_resources projection', () => {
let testGroup;
let testPrompt;
beforeEach(async () => {
testGroup = await PromptGroup.create({
name: 'Projection Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
testPrompt = await Prompt.create({
prompt: 'Test prompt for projection',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1'],
},
execute_code: {
file_ids: ['file-2', 'file-3'],
},
},
});
await PromptGroup.findByIdAndUpdate(testGroup._id, {
productionId: testPrompt._id,
});
});
afterEach(async () => {
await Prompt.deleteMany({});
await PromptGroup.deleteMany({});
});
it('should include tool_resources in prompt group projection', async () => {
const mockReq = { user: { id: testUsers.owner._id } };
const filter = {
pageNumber: 1,
pageSize: 10,
category: 'testing',
};
const result = await promptFns.getPromptGroups(mockReq, filter);
expect(result.promptGroups).toBeTruthy();
expect(Array.isArray(result.promptGroups)).toBe(true);
expect(result.promptGroups.length).toBeGreaterThan(0);
const foundGroup = result.promptGroups.find(
(group) => group._id.toString() === testGroup._id.toString(),
);
expect(foundGroup).toBeTruthy();
expect(foundGroup.productionPrompt.tool_resources).toEqual({
file_search: {
file_ids: ['file-1'],
},
execute_code: {
file_ids: ['file-2', 'file-3'],
},
});
});
});
describe('Error handling with tool_resources', () => {
it('should handle errors when creating prompt with tool_resources', async () => {
const invalidPromptData = {
prompt: {
prompt: 'Test prompt',
type: 'text',
groupId: 'invalid-id',
tool_resources: {
file_search: {
file_ids: ['file-1'],
},
},
},
author: testUsers.owner._id,
};
const result = await promptFns.savePrompt(invalidPromptData);
expect(result.message).toBe('Error saving prompt');
});
it('should handle errors when retrieving prompt with tool_resources', async () => {
const result = await promptFns.getPrompt({ _id: 'invalid-id' });
expect(result.message).toBe('Error getting prompt');
});
});
describe('Edge Cases - File Attachment Scenarios', () => {
let testGroup;
let testPrompt;
beforeEach(async () => {
testGroup = await PromptGroup.create({
name: 'Edge Case Test Group',
category: 'testing',
author: testUsers.owner._id,
authorName: testUsers.owner.name,
productionId: new mongoose.Types.ObjectId(),
});
testPrompt = await Prompt.create({
prompt: 'Test prompt with file attachments for edge cases',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2', 'file-3'],
},
execute_code: {
file_ids: ['file-4'],
},
image_edit: {
file_ids: ['file-5', 'file-6'],
},
},
});
});
afterEach(async () => {
await Prompt.deleteMany({});
await PromptGroup.deleteMany({});
});
describe('Orphaned File References', () => {
it('should maintain prompt functionality when referenced files are deleted', async () => {
const result = await promptFns.getPrompt({ _id: testPrompt._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2', 'file-3'],
},
execute_code: {
file_ids: ['file-4'],
},
image_edit: {
file_ids: ['file-5', 'file-6'],
},
});
expect(result.prompt).toBe('Test prompt with file attachments for edge cases');
expect(result.type).toBe('text');
});
it('should handle prompts with empty file_ids arrays', async () => {
const promptWithEmptyFileIds = await Prompt.create({
prompt: 'Prompt with empty file_ids',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: [],
},
execute_code: {
file_ids: [],
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithEmptyFileIds._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: {
file_ids: [],
},
execute_code: {
file_ids: [],
},
});
});
it('should handle prompts with null/undefined file_ids', async () => {
const promptWithNullFileIds = await Prompt.create({
prompt: 'Prompt with null file_ids',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: null,
},
execute_code: {
file_ids: undefined,
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithNullFileIds._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: {
file_ids: null,
},
});
});
});
describe('Invalid File References', () => {
it('should handle prompts with malformed file_ids', async () => {
const promptWithMalformedIds = await Prompt.create({
prompt: 'Prompt with malformed file_ids',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['', null, undefined, 'invalid-id', 'file-valid'],
},
execute_code: {
file_ids: [123, {}, []],
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithMalformedIds._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: {
file_ids: ['', null, null, 'invalid-id', 'file-valid'],
},
execute_code: {
file_ids: [123, {}, []],
},
});
});
it('should handle prompts with duplicate file_ids', async () => {
const promptWithDuplicates = await Prompt.create({
prompt: 'Prompt with duplicate file_ids',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['file-1', 'file-2', 'file-1', 'file-3', 'file-2'],
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithDuplicates._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2', 'file-1', 'file-3', 'file-2'],
},
});
});
});
describe('Tool Resource Edge Cases', () => {
it('should handle prompts with unknown tool resource types', async () => {
const promptWithUnknownTools = await Prompt.create({
prompt: 'Prompt with unknown tool resources',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
unknown_tool: {
file_ids: ['file-1'],
},
another_unknown: {
file_ids: ['file-2', 'file-3'],
},
file_search: {
file_ids: ['file-4'],
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithUnknownTools._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
unknown_tool: {
file_ids: ['file-1'],
},
another_unknown: {
file_ids: ['file-2', 'file-3'],
},
file_search: {
file_ids: ['file-4'],
},
});
});
it('should handle prompts with malformed tool_resources structure', async () => {
const promptWithMalformedTools = await Prompt.create({
prompt: 'Prompt with malformed tool_resources',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: 'not-an-object',
execute_code: {
file_ids: 'not-an-array',
},
image_edit: {
wrong_property: ['file-1'],
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithMalformedTools._id });
expect(result).toBeTruthy();
expect(result.tool_resources).toEqual({
file_search: 'not-an-object',
execute_code: {
file_ids: 'not-an-array',
},
image_edit: {
wrong_property: ['file-1'],
},
});
});
});
describe('Prompt Deletion vs File Persistence', () => {
it('should delete prompt but preserve file references in tool_resources', async () => {
const beforeDelete = await promptFns.getPrompt({ _id: testPrompt._id });
expect(beforeDelete.tool_resources).toEqual({
file_search: {
file_ids: ['file-1', 'file-2', 'file-3'],
},
execute_code: {
file_ids: ['file-4'],
},
image_edit: {
file_ids: ['file-5', 'file-6'],
},
});
const result = await promptFns.deletePrompt({
promptId: testPrompt._id,
groupId: testGroup._id,
author: testUsers.owner._id,
role: SystemRoles.USER,
});
expect(result.prompt).toBe('Prompt deleted successfully');
const deletedPrompt = await Prompt.findById(testPrompt._id);
expect(deletedPrompt).toBeNull();
});
it('should handle prompt deletion when tool_resources contain non-existent files', async () => {
const promptWithNonExistentFiles = await Prompt.create({
prompt: 'Prompt with non-existent file references',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['non-existent-file-1', 'non-existent-file-2'],
},
},
});
const result = await promptFns.deletePrompt({
promptId: promptWithNonExistentFiles._id,
groupId: testGroup._id,
author: testUsers.owner._id,
role: SystemRoles.USER,
});
expect(result.prompt).toBe('Prompt deleted successfully');
const deletedPrompt = await Prompt.findById(promptWithNonExistentFiles._id);
expect(deletedPrompt).toBeNull();
});
});
describe('Large File Collections', () => {
it('should handle prompts with many file attachments', async () => {
const manyFileIds = Array.from({ length: 100 }, (_, i) => `file-${i + 1}`);
const promptWithManyFiles = await Prompt.create({
prompt: 'Prompt with many file attachments',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: manyFileIds.slice(0, 50),
},
execute_code: {
file_ids: manyFileIds.slice(50, 100),
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithManyFiles._id });
expect(result).toBeTruthy();
expect(result.tool_resources.file_search.file_ids).toHaveLength(50);
expect(result.tool_resources.execute_code.file_ids).toHaveLength(50);
expect(result.tool_resources.file_search.file_ids[0]).toBe('file-1');
expect(result.tool_resources.execute_code.file_ids[49]).toBe('file-100');
});
it('should handle prompts with very long file_ids', async () => {
const longFileId = 'a'.repeat(1000);
const promptWithLongFileId = await Prompt.create({
prompt: 'Prompt with very long file ID',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: [longFileId],
},
},
});
const result = await promptFns.getPrompt({ _id: promptWithLongFileId._id });
expect(result).toBeTruthy();
expect(result.tool_resources.file_search.file_ids[0]).toBe(longFileId);
expect(result.tool_resources.file_search.file_ids[0].length).toBe(1000);
});
});
describe('Concurrent Operations', () => {
it('should handle concurrent updates to prompts with tool_resources', async () => {
const concurrentPrompts = await Promise.all([
Prompt.create({
prompt: 'Concurrent prompt 1',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['shared-file-1', 'unique-file-1'],
},
},
}),
Prompt.create({
prompt: 'Concurrent prompt 2',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['shared-file-1', 'unique-file-2'],
},
},
}),
Prompt.create({
prompt: 'Concurrent prompt 3',
type: 'text',
author: testUsers.owner._id,
groupId: testGroup._id,
tool_resources: {
file_search: {
file_ids: ['shared-file-1', 'unique-file-3'],
},
},
}),
]);
expect(concurrentPrompts).toHaveLength(3);
concurrentPrompts.forEach((prompt, index) => {
expect(prompt.tool_resources.file_search.file_ids).toContain('shared-file-1');
expect(prompt.tool_resources.file_search.file_ids).toContain(`unique-file-${index + 1}`);
});
const retrievedPrompts = await promptFns.getPrompts({ groupId: testGroup._id });
expect(retrievedPrompts.length).toBeGreaterThanOrEqual(3);
});
});
});
});

View File

@@ -111,8 +111,8 @@ const tokenValues = Object.assign(
'claude-': { prompt: 0.8, completion: 2.4 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
'deepseek-reasoner': { prompt: 0.28, completion: 0.42 },
deepseek: { prompt: 0.28, completion: 0.42 },
'deepseek-reasoner': { prompt: 0.55, completion: 2.19 },
deepseek: { prompt: 0.14, completion: 0.28 },
/* cohere doesn't have rates for the older command models,
so this was from https://artificialanalysis.ai/models/command-light/providers */
command: { prompt: 0.38, completion: 0.38 },
@@ -124,8 +124,7 @@ const tokenValues = Object.assign(
'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 },
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
'gemini-2.5-pro': { prompt: 1.25, completion: 10 },
'gemini-2.5-flash': { prompt: 0.3, completion: 2.5 },
'gemini-2.5-flash-lite': { prompt: 0.075, completion: 0.4 },
'gemini-2.5-flash': { prompt: 0.15, completion: 3.5 },
'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },

View File

@@ -571,9 +571,6 @@ describe('getCacheMultiplier', () => {
describe('Google Model Tests', () => {
const googleModels = [
'gemini-2.5-pro',
'gemini-2.5-flash',
'gemini-2.5-flash-lite',
'gemini-2.5-pro-preview-05-06',
'gemini-2.5-flash-preview-04-17',
'gemini-2.5-exp',
@@ -614,9 +611,6 @@ describe('Google Model Tests', () => {
it('should map to the correct model keys', () => {
const expected = {
'gemini-2.5-pro': 'gemini-2.5-pro',
'gemini-2.5-flash': 'gemini-2.5-flash',
'gemini-2.5-flash-lite': 'gemini-2.5-flash-lite',
'gemini-2.5-pro-preview-05-06': 'gemini-2.5-pro',
'gemini-2.5-flash-preview-04-17': 'gemini-2.5-flash',
'gemini-2.5-exp': 'gemini-2.5',

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.8.0",
"version": "v0.8.0-rc3",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -49,14 +49,14 @@
"@langchain/google-vertexai": "^0.2.13",
"@langchain/openai": "^0.5.18",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.82",
"@librechat/agents": "^2.4.79",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@microsoft/microsoft-graph-client": "^3.0.7",
"@modelcontextprotocol/sdk": "^1.17.1",
"@node-saml/passport-saml": "^5.1.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.12.1",
"axios": "^1.8.2",
"bcryptjs": "^2.4.3",
"compression": "^1.8.1",
"connect-redis": "^8.1.0",

View File

@@ -1,8 +1,8 @@
const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const openIdClient = require('openid-client');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { isEnabled, findOpenIDUser } = require('@librechat/api');
const {
requestPasswordReset,
setOpenIDAuthTokens,
@@ -11,9 +11,8 @@ const {
registerUser,
} = require('~/server/services/AuthService');
const { findUser, getUserById, deleteAllUserSessions, findSession } = require('~/models');
const { getGraphApiToken } = require('~/server/services/GraphTokenService');
const { getOAuthReconnectionManager } = require('~/config');
const { getOpenIdConfig } = require('~/strategies');
const { getGraphApiToken } = require('~/server/services/GraphTokenService');
const registrationController = async (req, res) => {
try {
@@ -72,14 +71,8 @@ const refreshController = async (req, res) => {
const openIdConfig = getOpenIdConfig();
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
const claims = tokenset.claims();
const { user, error } = await findOpenIDUser({
findUser,
email: claims.email,
openidId: claims.sub,
idOnTheSource: claims.oid,
strategyName: 'refreshController',
});
if (error || !user) {
const user = await findUser({ email: claims.email });
if (!user) {
return res.status(401).redirect('/login');
}
const token = setOpenIDAuthTokens(tokenset, res, user._id.toString());
@@ -103,25 +96,14 @@ const refreshController = async (req, res) => {
return res.status(200).send({ token, user });
}
/** Session with the hashed refresh token */
const session = await findSession(
{
userId: userId,
refreshToken: refreshToken,
},
{ lean: false },
);
// Find the session with the hashed refresh token
const session = await findSession({
userId: userId,
refreshToken: refreshToken,
});
if (session && session.expiration > new Date()) {
const token = await setAuthTokens(userId, res, session);
// trigger OAuth MCP server reconnection asynchronously (best effort)
void getOAuthReconnectionManager()
.reconnectServers(userId)
.catch((err) => {
logger.error('Error reconnecting OAuth MCP servers:', err);
});
const token = await setAuthTokens(userId, res, session._id);
res.status(200).send({ token, user });
} else if (req?.query?.retry) {
// Retrying from a refresh token request that failed (401)
@@ -132,7 +114,7 @@ const refreshController = async (req, res) => {
res.status(401).send('Refresh token expired or not found for this user');
}
} catch (err) {
logger.error(`[refreshController] Invalid refresh token:`, err);
logger.error(`[refreshController] Refresh token: ${refreshToken}`, err);
res.status(403).send('Invalid refresh token');
}
};

View File

@@ -1,9 +1,16 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys } = require('librechat-data-provider');
const { getToolkitKey, checkPluginAuth, filterUniquePlugins } = require('@librechat/api');
const { getCachedTools, setCachedTools } = require('~/server/services/Config');
const { CacheKeys, Constants } = require('librechat-data-provider');
const {
getToolkitKey,
checkPluginAuth,
filterUniquePlugins,
convertMCPToolToPlugin,
convertMCPToolsToPlugins,
} = require('@librechat/api');
const { getCachedTools, setCachedTools, mergeUserTools } = require('~/server/services/Config');
const { availableTools, toolkits } = require('~/app/clients/tools');
const { getAppConfig } = require('~/server/services/Config');
const { getMCPManager } = require('~/config');
const { getLogStores } = require('~/cache');
const getAvailablePluginsController = async (req, res) => {
@@ -65,27 +72,63 @@ const getAvailableTools = async (req, res) => {
}
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedToolsArray = await cache.get(CacheKeys.TOOLS);
const cachedUserTools = await getCachedTools({ userId });
const appConfig = req.config ?? (await getAppConfig({ role: req.user?.role }));
// Return early if we have cached tools
if (cachedToolsArray != null) {
res.status(200).json(cachedToolsArray);
/** @type {TPlugin[]} */
let mcpPlugins;
if (appConfig?.mcpConfig) {
const mcpManager = getMCPManager();
mcpPlugins =
cachedUserTools != null
? convertMCPToolsToPlugins({ functionTools: cachedUserTools, mcpManager })
: undefined;
}
if (
cachedToolsArray != null &&
(appConfig?.mcpConfig != null ? mcpPlugins != null && mcpPlugins.length > 0 : true)
) {
const dedupedTools = filterUniquePlugins([...(mcpPlugins ?? []), ...cachedToolsArray]);
res.status(200).json(dedupedTools);
return;
}
/** @type {Record<string, FunctionTool> | null} Get tool definitions to filter which tools are actually available */
let toolDefinitions = await getCachedTools();
if (toolDefinitions == null && appConfig?.availableTools != null) {
logger.warn('[getAvailableTools] Tool cache was empty, re-initializing from app config');
await setCachedTools(appConfig.availableTools);
toolDefinitions = appConfig.availableTools;
}
let toolDefinitions = await getCachedTools({ includeGlobal: true });
let prelimCachedTools;
/** @type {import('@librechat/api').LCManifestTool[]} */
let pluginManifest = availableTools;
if (appConfig?.mcpConfig != null) {
try {
const mcpManager = getMCPManager();
const mcpTools = await mcpManager.getAllToolFunctions(userId);
prelimCachedTools = prelimCachedTools ?? {};
for (const [toolKey, toolData] of Object.entries(mcpTools)) {
const plugin = convertMCPToolToPlugin({
toolKey,
toolData,
mcpManager,
});
if (plugin) {
pluginManifest.push(plugin);
}
prelimCachedTools[toolKey] = toolData;
}
await mergeUserTools({ userId, cachedUserTools, userTools: prelimCachedTools });
} catch (error) {
logger.error(
'[getAvailableTools] Error loading MCP Tools, servers may still be initializing:',
error,
);
}
} else if (prelimCachedTools != null) {
await setCachedTools(prelimCachedTools, { isGlobal: true });
}
/** @type {TPlugin[]} Deduplicate and authenticate plugins */
const uniquePlugins = filterUniquePlugins(pluginManifest);
const authenticatedPlugins = uniquePlugins.map((plugin) => {
@@ -96,13 +139,13 @@ const getAvailableTools = async (req, res) => {
}
});
/** Filter plugins based on availability */
/** Filter plugins based on availability and add MCP-specific auth config */
const toolsOutput = [];
for (const plugin of authenticatedPlugins) {
const isToolDefined = toolDefinitions?.[plugin.pluginKey] !== undefined;
const isToolDefined = toolDefinitions[plugin.pluginKey] !== undefined;
const isToolkit =
plugin.toolkit === true &&
Object.keys(toolDefinitions ?? {}).some(
Object.keys(toolDefinitions).some(
(key) => getToolkitKey({ toolkits, toolName: key }) === plugin.pluginKey,
);
@@ -110,13 +153,39 @@ const getAvailableTools = async (req, res) => {
continue;
}
toolsOutput.push(plugin);
const toolToAdd = { ...plugin };
if (plugin.pluginKey.includes(Constants.mcp_delimiter)) {
const parts = plugin.pluginKey.split(Constants.mcp_delimiter);
const serverName = parts[parts.length - 1];
const serverConfig = appConfig?.mcpConfig?.[serverName];
if (serverConfig?.customUserVars) {
const customVarKeys = Object.keys(serverConfig.customUserVars);
if (customVarKeys.length === 0) {
toolToAdd.authConfig = [];
toolToAdd.authenticated = true;
} else {
toolToAdd.authConfig = Object.entries(serverConfig.customUserVars).map(
([key, value]) => ({
authField: key,
label: value.title || key,
description: value.description || '',
}),
);
toolToAdd.authenticated = false;
}
}
}
toolsOutput.push(toolToAdd);
}
const finalTools = filterUniquePlugins(toolsOutput);
await cache.set(CacheKeys.TOOLS, finalTools);
res.status(200).json(finalTools);
const dedupedTools = filterUniquePlugins([...(mcpPlugins ?? []), ...finalTools]);
res.status(200).json(dedupedTools);
} catch (error) {
logger.error('[getAvailableTools]', error);
res.status(500).json({ message: error.message });

View File

@@ -1,3 +1,4 @@
const { Constants } = require('librechat-data-provider');
const { getCachedTools, getAppConfig } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
@@ -16,10 +17,18 @@ jest.mock('~/server/services/Config', () => ({
includedTools: [],
}),
setCachedTools: jest.fn(),
mergeUserTools: jest.fn(),
}));
// loadAndFormatTools mock removed - no longer used in PluginController
// getMCPManager mock removed - no longer used in PluginController
jest.mock('~/config', () => ({
getMCPManager: jest.fn(() => ({
getAllToolFunctions: jest.fn().mockResolvedValue({}),
getRawConfig: jest.fn().mockReturnValue({}),
})),
getFlowStateManager: jest.fn(),
}));
jest.mock('~/app/clients/tools', () => ({
availableTools: [],
@@ -150,6 +159,52 @@ describe('PluginController', () => {
});
describe('getAvailableTools', () => {
it('should use convertMCPToolsToPlugins for user-specific MCP tools', async () => {
const mockUserTools = {
[`tool1${Constants.mcp_delimiter}server1`]: {
type: 'function',
function: {
name: `tool1${Constants.mcp_delimiter}server1`,
description: 'Tool 1',
parameters: { type: 'object', properties: {} },
},
},
};
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce(mockUserTools);
mockReq.config = {
mcpConfig: {
server1: {},
},
paths: { structuredTools: '/mock/path' },
};
// Mock MCP manager to return empty tools initially (since getAllToolFunctions is called)
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue({}),
getRawConfig: jest.fn().mockReturnValue({}),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
// Mock second call to return tool definitions (includeGlobal: true)
getCachedTools.mockResolvedValueOnce(mockUserTools);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData).toBeDefined();
expect(Array.isArray(responseData)).toBe(true);
expect(responseData.length).toBeGreaterThan(0);
const convertedTool = responseData.find(
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}server1`,
);
expect(convertedTool).toBeDefined();
// The real convertMCPToolsToPlugins extracts the name from the delimiter
expect(convertedTool.name).toBe('tool1');
});
it('should use filterUniquePlugins to deduplicate combined tools', async () => {
const mockUserTools = {
'user-tool': {
@@ -174,6 +229,9 @@ describe('PluginController', () => {
paths: { structuredTools: '/mock/path' },
};
// Mock second call to return tool definitions
getCachedTools.mockResolvedValueOnce(mockUserTools);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
@@ -196,7 +254,14 @@ describe('PluginController', () => {
require('~/app/clients/tools').availableTools.push(mockPlugin);
mockCache.get.mockResolvedValue(null);
// getCachedTools returns the tool definitions
// First call returns null for user tools
getCachedTools.mockResolvedValueOnce(null);
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
// Second call (with includeGlobal: true) returns the tool definitions
getCachedTools.mockResolvedValueOnce({
tool1: {
type: 'function',
@@ -207,10 +272,6 @@ describe('PluginController', () => {
},
},
});
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
await getAvailableTools(mockReq, mockRes);
@@ -241,7 +302,14 @@ describe('PluginController', () => {
});
mockCache.get.mockResolvedValue(null);
// getCachedTools returns the tool definitions
// First call returns null for user tools
getCachedTools.mockResolvedValueOnce(null);
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
// Second call (with includeGlobal: true) returns the tool definitions
getCachedTools.mockResolvedValueOnce({
toolkit1_function: {
type: 'function',
@@ -252,10 +320,6 @@ describe('PluginController', () => {
},
},
});
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
await getAvailableTools(mockReq, mockRes);
@@ -267,7 +331,126 @@ describe('PluginController', () => {
});
});
describe('plugin.icon behavior', () => {
const callGetAvailableToolsWithMCPServer = async (serverConfig) => {
mockCache.get.mockResolvedValue(null);
const functionTools = {
[`test-tool${Constants.mcp_delimiter}test-server`]: {
type: 'function',
function: {
name: `test-tool${Constants.mcp_delimiter}test-server`,
description: 'A test tool',
parameters: { type: 'object', properties: {} },
},
},
};
// Mock the MCP manager to return tools and server config
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue(functionTools),
getRawConfig: jest.fn().mockReturnValue(serverConfig),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
// First call returns empty user tools
getCachedTools.mockResolvedValueOnce({});
// Mock getAppConfig to return the mcpConfig
mockReq.config = {
mcpConfig: {
'test-server': serverConfig,
},
};
// Second call (with includeGlobal: true) returns the tool definitions
getCachedTools.mockResolvedValueOnce(functionTools);
await getAvailableTools(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
return responseData.find(
(tool) => tool.pluginKey === `test-tool${Constants.mcp_delimiter}test-server`,
);
};
it('should set plugin.icon when iconPath is defined', async () => {
const serverConfig = {
iconPath: '/path/to/icon.png',
};
const testTool = await callGetAvailableToolsWithMCPServer(serverConfig);
expect(testTool.icon).toBe('/path/to/icon.png');
});
it('should set plugin.icon to undefined when iconPath is not defined', async () => {
const serverConfig = {};
const testTool = await callGetAvailableToolsWithMCPServer(serverConfig);
expect(testTool.icon).toBeUndefined();
});
});
describe('helper function integration', () => {
it('should properly handle MCP tools with custom user variables', async () => {
const appConfig = {
mcpConfig: {
'test-server': {
customUserVars: {
API_KEY: { title: 'API Key', description: 'Your API key' },
},
},
},
};
// Mock MCP tools returned by getAllToolFunctions
const mcpToolFunctions = {
[`tool1${Constants.mcp_delimiter}test-server`]: {
type: 'function',
function: {
name: `tool1${Constants.mcp_delimiter}test-server`,
description: 'Tool 1',
parameters: {},
},
},
};
// Mock the MCP manager to return tools
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue(mcpToolFunctions),
getRawConfig: jest.fn().mockReturnValue({
customUserVars: {
API_KEY: { title: 'API Key', description: 'Your API key' },
},
}),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
mockCache.get.mockResolvedValue(null);
mockReq.config = appConfig;
// First call returns user tools (empty in this case)
getCachedTools.mockResolvedValueOnce({});
// Second call (with includeGlobal: true) returns tool definitions including our MCP tool
getCachedTools.mockResolvedValueOnce(mcpToolFunctions);
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
const responseData = mockRes.json.mock.calls[0][0];
expect(Array.isArray(responseData)).toBe(true);
// Find the MCP tool in the response
const mcpTool = responseData.find(
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}test-server`,
);
// The actual implementation adds authConfig and sets authenticated to false when customUserVars exist
expect(mcpTool).toBeDefined();
expect(mcpTool.authConfig).toEqual([
{ authField: 'API_KEY', label: 'API Key', description: 'Your API key' },
]);
expect(mcpTool.authenticated).toBe(false);
});
it('should handle error cases gracefully', async () => {
mockCache.get.mockRejectedValue(new Error('Cache error'));
@@ -289,13 +472,23 @@ describe('PluginController', () => {
it('should handle null cachedTools and cachedUserTools', async () => {
mockCache.get.mockResolvedValue(null);
// getCachedTools returns empty object instead of null
getCachedTools.mockResolvedValueOnce({});
// First call returns null for user tools
getCachedTools.mockResolvedValueOnce(null);
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
// Mock MCP manager to return no tools
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue({}),
getRawConfig: jest.fn().mockReturnValue({}),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
// Second call (with includeGlobal: true) returns empty object instead of null
getCachedTools.mockResolvedValueOnce({});
await getAvailableTools(mockReq, mockRes);
// Should handle null values gracefully
@@ -310,9 +503,9 @@ describe('PluginController', () => {
paths: { structuredTools: '/mock/path' },
};
// Mock getCachedTools to return undefined
// Mock getCachedTools to return undefined for both calls
getCachedTools.mockReset();
getCachedTools.mockResolvedValueOnce(undefined);
getCachedTools.mockResolvedValueOnce(undefined).mockResolvedValueOnce(undefined);
await getAvailableTools(mockReq, mockRes);
@@ -321,6 +514,51 @@ describe('PluginController', () => {
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should handle `cachedToolsArray` and `mcpPlugins` both being defined', async () => {
const cachedTools = [{ name: 'CachedTool', pluginKey: 'cached-tool', description: 'Cached' }];
// Use MCP delimiter for the user tool so convertMCPToolsToPlugins works
const userTools = {
[`user-tool${Constants.mcp_delimiter}server1`]: {
type: 'function',
function: {
name: `user-tool${Constants.mcp_delimiter}server1`,
description: 'User tool',
parameters: {},
},
},
};
mockCache.get.mockResolvedValue(cachedTools);
getCachedTools.mockResolvedValueOnce(userTools);
mockReq.config = {
mcpConfig: {
server1: {},
},
paths: { structuredTools: '/mock/path' },
};
// Mock MCP manager to return empty tools initially
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue({}),
getRawConfig: jest.fn().mockReturnValue({}),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
// The controller expects a second call to getCachedTools
getCachedTools.mockResolvedValueOnce({
'cached-tool': { type: 'function', function: { name: 'cached-tool' } },
[`user-tool${Constants.mcp_delimiter}server1`]:
userTools[`user-tool${Constants.mcp_delimiter}server1`],
});
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
const responseData = mockRes.json.mock.calls[0][0];
// Should have both cached and user tools
expect(responseData.length).toBeGreaterThanOrEqual(2);
});
it('should handle empty toolDefinitions object', async () => {
mockCache.get.mockResolvedValue(null);
// Reset getCachedTools to ensure clean state
@@ -331,12 +569,76 @@ describe('PluginController', () => {
// Ensure no plugins are available
require('~/app/clients/tools').availableTools.length = 0;
// Reset MCP manager to default state
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue({}),
getRawConfig: jest.fn().mockReturnValue({}),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
await getAvailableTools(mockReq, mockRes);
// With empty tool definitions, no tools should be in the final output
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should handle MCP tools without customUserVars', async () => {
const appConfig = {
mcpConfig: {
'test-server': {
// No customUserVars defined
},
},
};
const mockUserTools = {
[`tool1${Constants.mcp_delimiter}test-server`]: {
type: 'function',
function: {
name: `tool1${Constants.mcp_delimiter}test-server`,
description: 'Tool 1',
parameters: { type: 'object', properties: {} },
},
},
};
// Mock the MCP manager to return the tools
const mockMCPManager = {
getAllToolFunctions: jest.fn().mockResolvedValue(mockUserTools),
getRawConfig: jest.fn().mockReturnValue({
// No customUserVars defined
}),
};
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
mockCache.get.mockResolvedValue(null);
mockReq.config = appConfig;
// First call returns empty user tools
getCachedTools.mockResolvedValueOnce({});
// Second call (with includeGlobal: true) returns the tool definitions
getCachedTools.mockResolvedValueOnce(mockUserTools);
// Ensure no plugins in availableTools for clean test
require('~/app/clients/tools').availableTools.length = 0;
await getAvailableTools(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(200);
const responseData = mockRes.json.mock.calls[0][0];
expect(Array.isArray(responseData)).toBe(true);
expect(responseData.length).toBeGreaterThan(0);
const mcpTool = responseData.find(
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}test-server`,
);
expect(mcpTool).toBeDefined();
expect(mcpTool.authenticated).toBe(true);
// The actual implementation sets authConfig to empty array when no customUserVars
expect(mcpTool.authConfig).toEqual([]);
});
it('should handle undefined filteredTools and includedTools', async () => {
mockReq.config = {};
mockCache.get.mockResolvedValue(null);
@@ -365,129 +667,20 @@ describe('PluginController', () => {
require('~/app/clients/tools').availableTools.push(mockToolkit);
mockCache.get.mockResolvedValue(null);
// getCachedTools returns empty object to avoid null reference error
// First call returns empty object
getCachedTools.mockResolvedValueOnce({});
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
// Second call (with includeGlobal: true) returns empty object to avoid null reference error
getCachedTools.mockResolvedValueOnce({});
await getAvailableTools(mockReq, mockRes);
// Should handle null toolDefinitions gracefully
expect(mockRes.status).toHaveBeenCalledWith(200);
});
it('should handle undefined toolDefinitions when checking isToolDefined (traversaal_search bug)', async () => {
// This test reproduces the bug where toolDefinitions is undefined
// and accessing toolDefinitions[plugin.pluginKey] causes a TypeError
const mockPlugin = {
name: 'Traversaal Search',
pluginKey: 'traversaal_search',
description: 'Search plugin',
};
// Add the plugin to availableTools
require('~/app/clients/tools').availableTools.push(mockPlugin);
mockCache.get.mockResolvedValue(null);
mockReq.config = {
mcpConfig: null,
paths: { structuredTools: '/mock/path' },
};
// CRITICAL: getCachedTools returns undefined
// This is what causes the bug when trying to access toolDefinitions[plugin.pluginKey]
getCachedTools.mockResolvedValueOnce(undefined);
// This should not throw an error with the optional chaining fix
await getAvailableTools(mockReq, mockRes);
// Should handle undefined toolDefinitions gracefully and return empty array
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith([]);
});
it('should re-initialize tools from appConfig when cache returns null', async () => {
// Setup: Initial state with tools in appConfig
const mockAppTools = {
tool1: {
type: 'function',
function: {
name: 'tool1',
description: 'Tool 1',
parameters: {},
},
},
tool2: {
type: 'function',
function: {
name: 'tool2',
description: 'Tool 2',
parameters: {},
},
},
};
// Add matching plugins to availableTools
require('~/app/clients/tools').availableTools.push(
{ name: 'Tool 1', pluginKey: 'tool1', description: 'Tool 1' },
{ name: 'Tool 2', pluginKey: 'tool2', description: 'Tool 2' },
);
// Simulate cache cleared state (returns null)
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce(null); // Global tools (cache cleared)
mockReq.config = {
filteredTools: [],
includedTools: [],
availableTools: mockAppTools,
};
// Mock setCachedTools to verify it's called to re-initialize
const { setCachedTools } = require('~/server/services/Config');
await getAvailableTools(mockReq, mockRes);
// Should have re-initialized the cache with tools from appConfig
expect(setCachedTools).toHaveBeenCalledWith(mockAppTools);
// Should still return tools successfully
expect(mockRes.status).toHaveBeenCalledWith(200);
const responseData = mockRes.json.mock.calls[0][0];
expect(responseData).toHaveLength(2);
expect(responseData.find((t) => t.pluginKey === 'tool1')).toBeDefined();
expect(responseData.find((t) => t.pluginKey === 'tool2')).toBeDefined();
});
it('should handle cache clear without appConfig.availableTools gracefully', async () => {
// Setup: appConfig without availableTools
getAppConfig.mockResolvedValue({
filteredTools: [],
includedTools: [],
// No availableTools property
});
// Clear availableTools array
require('~/app/clients/tools').availableTools.length = 0;
// Cache returns null (cleared state)
mockCache.get.mockResolvedValue(null);
getCachedTools.mockResolvedValueOnce(null); // Global tools (cache cleared)
mockReq.config = {
filteredTools: [],
includedTools: [],
// No availableTools
};
await getAvailableTools(mockReq, mockRes);
// Should handle gracefully without crashing
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith([]);
});
});
});

View File

@@ -1,34 +1,37 @@
const { logger } = require('@librechat/data-schemas');
const { Tools, CacheKeys, Constants, FileSources } = require('librechat-data-provider');
const {
webSearchKeys,
MCPOAuthHandler,
MCPTokenStorage,
normalizeHttpError,
extractWebSearchEnvVars,
normalizeHttpError,
MCPTokenStorage,
} = require('@librechat/api');
const {
getFiles,
findToken,
updateUser,
deleteFiles,
deleteConvos,
deletePresets,
deleteMessages,
deleteUserById,
deleteAllSharedLinks,
deleteAllUserSessions,
} = require('~/models');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
const { Tools, Constants, FileSources } = require('librechat-data-provider');
const { processDeleteRequest } = require('~/server/services/Files/process');
const { Transaction, Balance, User, Token } = require('~/db/models');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { getAppConfig } = require('~/server/services/Config');
const { deleteToolCalls } = require('~/models/ToolCall');
const { deleteAllSharedLinks } = require('~/models');
const { getMCPManager } = require('~/config');
const { MCPOAuthHandler } = require('@librechat/api');
const { getFlowStateManager } = require('~/config');
const { CacheKeys } = require('librechat-data-provider');
const { getLogStores } = require('~/cache');
const { clearMCPServerTools } = require('~/server/services/Config/mcpToolsCache');
const { findToken } = require('~/models');
const getUserController = async (req, res) => {
const appConfig = await getAppConfig({ role: req.user?.role });
@@ -372,6 +375,9 @@ const maybeUninstallOAuthMCP = async (userId, pluginKey, appConfig) => {
const flowId = MCPOAuthHandler.generateFlowId(userId, serverName);
await flowManager.deleteFlow(flowId, 'mcp_get_tokens');
await flowManager.deleteFlow(flowId, 'mcp_oauth');
// 6. clear the tools cache for the server
await clearMCPServerTools({ userId, serverName });
};
module.exports = {

View File

@@ -1,342 +0,0 @@
const { Tools } = require('librechat-data-provider');
// Mock all dependencies before requiring the module
jest.mock('nanoid', () => ({
nanoid: jest.fn(() => 'mock-id'),
}));
jest.mock('@librechat/api', () => ({
sendEvent: jest.fn(),
}));
jest.mock('@librechat/data-schemas', () => ({
logger: {
error: jest.fn(),
},
}));
jest.mock('@librechat/agents', () => ({
EnvVar: { CODE_API_KEY: 'CODE_API_KEY' },
Providers: { GOOGLE: 'google' },
GraphEvents: {},
getMessageId: jest.fn(),
ToolEndHandler: jest.fn(),
handleToolCalls: jest.fn(),
ChatModelStreamHandler: jest.fn(),
}));
jest.mock('~/server/services/Files/Citations', () => ({
processFileCitations: jest.fn(),
}));
jest.mock('~/server/services/Files/Code/process', () => ({
processCodeOutput: jest.fn(),
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
jest.mock('~/server/services/Files/process', () => ({
saveBase64Image: jest.fn(),
}));
describe('createToolEndCallback', () => {
let req, res, artifactPromises, createToolEndCallback;
let logger;
beforeEach(() => {
jest.clearAllMocks();
// Get the mocked logger
logger = require('@librechat/data-schemas').logger;
// Now require the module after all mocks are set up
const callbacks = require('../callbacks');
createToolEndCallback = callbacks.createToolEndCallback;
req = {
user: { id: 'user123' },
};
res = {
headersSent: false,
write: jest.fn(),
};
artifactPromises = [];
});
describe('ui_resources artifact handling', () => {
it('should process ui_resources artifact and return attachment when headers not sent', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const output = {
tool_call_id: 'tool123',
artifact: {
[Tools.ui_resources]: {
data: {
0: { type: 'button', label: 'Click me' },
1: { type: 'input', placeholder: 'Enter text' },
},
},
},
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
// Wait for all promises to resolve
const results = await Promise.all(artifactPromises);
// When headers are not sent, it returns attachment without writing
expect(res.write).not.toHaveBeenCalled();
const attachment = results[0];
expect(attachment).toEqual({
type: Tools.ui_resources,
messageId: 'run456',
toolCallId: 'tool123',
conversationId: 'thread789',
[Tools.ui_resources]: {
0: { type: 'button', label: 'Click me' },
1: { type: 'input', placeholder: 'Enter text' },
},
});
});
it('should write to response when headers are already sent', async () => {
res.headersSent = true;
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const output = {
tool_call_id: 'tool123',
artifact: {
[Tools.ui_resources]: {
data: {
0: { type: 'carousel', items: [] },
},
},
},
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
const results = await Promise.all(artifactPromises);
expect(res.write).toHaveBeenCalled();
expect(results[0]).toEqual({
type: Tools.ui_resources,
messageId: 'run456',
toolCallId: 'tool123',
conversationId: 'thread789',
[Tools.ui_resources]: {
0: { type: 'carousel', items: [] },
},
});
});
it('should handle errors when processing ui_resources', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
// Mock res.write to throw an error
res.headersSent = true;
res.write.mockImplementation(() => {
throw new Error('Write failed');
});
const output = {
tool_call_id: 'tool123',
artifact: {
[Tools.ui_resources]: {
data: {
0: { type: 'test' },
},
},
},
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
const results = await Promise.all(artifactPromises);
expect(logger.error).toHaveBeenCalledWith(
'Error processing artifact content:',
expect.any(Error),
);
expect(results[0]).toBeNull();
});
it('should handle multiple artifacts including ui_resources', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const output = {
tool_call_id: 'tool123',
artifact: {
[Tools.ui_resources]: {
data: {
0: { type: 'chart', data: [] },
},
},
[Tools.web_search]: {
results: ['result1', 'result2'],
},
},
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
const results = await Promise.all(artifactPromises);
// Both ui_resources and web_search should be processed
expect(artifactPromises).toHaveLength(2);
expect(results).toHaveLength(2);
// Check ui_resources attachment
const uiResourceAttachment = results.find((r) => r?.type === Tools.ui_resources);
expect(uiResourceAttachment).toBeTruthy();
expect(uiResourceAttachment[Tools.ui_resources]).toEqual({
0: { type: 'chart', data: [] },
});
// Check web_search attachment
const webSearchAttachment = results.find((r) => r?.type === Tools.web_search);
expect(webSearchAttachment).toBeTruthy();
expect(webSearchAttachment[Tools.web_search]).toEqual({
results: ['result1', 'result2'],
});
});
it('should not process artifacts when output has no artifacts', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const output = {
tool_call_id: 'tool123',
content: 'Some regular content',
// No artifact property
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
expect(artifactPromises).toHaveLength(0);
expect(res.write).not.toHaveBeenCalled();
});
});
describe('edge cases', () => {
it('should handle empty ui_resources data object', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const output = {
tool_call_id: 'tool123',
artifact: {
[Tools.ui_resources]: {
data: {},
},
},
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
const results = await Promise.all(artifactPromises);
expect(results[0]).toEqual({
type: Tools.ui_resources,
messageId: 'run456',
toolCallId: 'tool123',
conversationId: 'thread789',
[Tools.ui_resources]: {},
});
});
it('should handle ui_resources with complex nested data', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const complexData = {
0: {
type: 'form',
fields: [
{ name: 'field1', type: 'text', required: true },
{ name: 'field2', type: 'select', options: ['a', 'b', 'c'] },
],
nested: {
deep: {
value: 123,
array: [1, 2, 3],
},
},
},
};
const output = {
tool_call_id: 'tool123',
artifact: {
[Tools.ui_resources]: {
data: complexData,
},
},
};
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output }, metadata);
const results = await Promise.all(artifactPromises);
expect(results[0][Tools.ui_resources]).toEqual(complexData);
});
it('should handle when output is undefined', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback({ output: undefined }, metadata);
expect(artifactPromises).toHaveLength(0);
expect(res.write).not.toHaveBeenCalled();
});
it('should handle when data parameter is undefined', async () => {
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
const metadata = {
run_id: 'run456',
thread_id: 'thread789',
};
await toolEndCallback(undefined, metadata);
expect(artifactPromises).toHaveLength(0);
expect(res.write).not.toHaveBeenCalled();
});
});
});

View File

@@ -158,7 +158,7 @@ describe('duplicateAgent', () => {
});
});
it('should convert `tool_resources.ocr` to `tool_resources.context`', async () => {
it('should handle tool_resources.ocr correctly', async () => {
const mockAgent = {
id: 'agent_123',
name: 'Test Agent',
@@ -178,7 +178,7 @@ describe('duplicateAgent', () => {
expect(createAgent).toHaveBeenCalledWith(
expect.objectContaining({
tool_resources: {
context: { enabled: true, config: 'test' },
ocr: { enabled: true, config: 'test' },
},
}),
);

View File

@@ -265,30 +265,6 @@ function createToolEndCallback({ req, res, artifactPromises }) {
);
}
// TODO: a lot of duplicated code in createToolEndCallback
// we should refactor this to use a helper function in a follow-up PR
if (output.artifact[Tools.ui_resources]) {
artifactPromises.push(
(async () => {
const attachment = {
type: Tools.ui_resources,
messageId: metadata.run_id,
toolCallId: output.tool_call_id,
conversationId: metadata.thread_id,
[Tools.ui_resources]: output.artifact[Tools.ui_resources].data,
};
if (!res.headersSent) {
return attachment;
}
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
return attachment;
})().catch((error) => {
logger.error('Error processing artifact content:', error);
return null;
}),
);
}
if (output.artifact[Tools.web_search]) {
artifactPromises.push(
(async () => {

View File

@@ -1121,13 +1121,6 @@ class AgentClient extends BaseClient {
);
}
if (endpointConfig?.titleConvo === false) {
logger.debug(
`[api/server/controllers/agents/client.js #titleConvo] Title generation disabled for endpoint "${endpoint}"`,
);
return;
}
if (endpointConfig?.titleEndpoint && endpointConfig.titleEndpoint !== endpoint) {
try {
titleProviderConfig = getProviderConfig({
@@ -1137,7 +1130,7 @@ class AgentClient extends BaseClient {
endpoint = endpointConfig.titleEndpoint;
} catch (error) {
logger.warn(
`[api/server/controllers/agents/client.js #titleConvo] Error getting title endpoint config for "${endpointConfig.titleEndpoint}", falling back to default`,
`[api/server/controllers/agents/client.js #titleConvo] Error getting title endpoint config for ${endpointConfig.titleEndpoint}, falling back to default`,
error,
);
// Fall back to original provider config

View File

@@ -263,125 +263,6 @@ describe('AgentClient - titleConvo', () => {
expect(result).toBeUndefined();
});
it('should skip title generation when titleConvo is set to false', async () => {
// Set titleConvo to false in endpoint config
mockReq.config = {
endpoints: {
[EModelEndpoint.openAI]: {
titleConvo: false,
titleModel: 'gpt-3.5-turbo',
titlePrompt: 'Custom title prompt',
titleMethod: 'structured',
titlePromptTemplate: 'Template: {{content}}',
},
},
};
const text = 'Test conversation text';
const abortController = new AbortController();
const result = await client.titleConvo({ text, abortController });
// Should return undefined without generating title
expect(result).toBeUndefined();
// generateTitle should NOT have been called
expect(mockRun.generateTitle).not.toHaveBeenCalled();
// recordCollectedUsage should NOT have been called
expect(client.recordCollectedUsage).not.toHaveBeenCalled();
});
it('should skip title generation when titleConvo is false in all config', async () => {
// Set titleConvo to false in "all" config
mockReq.config = {
endpoints: {
all: {
titleConvo: false,
titleModel: 'gpt-4o-mini',
titlePrompt: 'All config title prompt',
titleMethod: 'completion',
titlePromptTemplate: 'All config template',
},
},
};
const text = 'Test conversation text';
const abortController = new AbortController();
const result = await client.titleConvo({ text, abortController });
// Should return undefined without generating title
expect(result).toBeUndefined();
// generateTitle should NOT have been called
expect(mockRun.generateTitle).not.toHaveBeenCalled();
// recordCollectedUsage should NOT have been called
expect(client.recordCollectedUsage).not.toHaveBeenCalled();
});
it('should skip title generation when titleConvo is false for custom endpoint scenario', async () => {
// This test validates the behavior when customEndpointConfig (retrieved via
// getProviderConfig for custom endpoints) has titleConvo: false.
//
// The code path is:
// 1. endpoints?.all is checked (undefined in this test)
// 2. endpoints?.[endpoint] is checked (our test config)
// 3. Would fall back to titleProviderConfig.customEndpointConfig (for real custom endpoints)
//
// We simulate a custom endpoint scenario using a dynamically named endpoint config
// Create a unique endpoint name that represents a custom endpoint
const customEndpointName = 'customEndpoint';
// Configure the endpoint to have titleConvo: false
// This simulates what would be in customEndpointConfig for a real custom endpoint
mockReq.config = {
endpoints: {
// No 'all' config - so it will check endpoints[endpoint]
// This config represents what customEndpointConfig would contain
[customEndpointName]: {
titleConvo: false,
titleModel: 'custom-model-v1',
titlePrompt: 'Custom endpoint title prompt',
titleMethod: 'completion',
titlePromptTemplate: 'Custom template: {{content}}',
baseURL: 'https://api.custom-llm.com/v1',
apiKey: 'test-custom-key',
// Additional custom endpoint properties
models: {
default: ['custom-model-v1', 'custom-model-v2'],
},
},
},
};
// Set up agent to use our custom endpoint
// Use openAI as base but override with custom endpoint name for this test
mockAgent.endpoint = EModelEndpoint.openAI;
mockAgent.provider = EModelEndpoint.openAI;
// Override the endpoint in the config to point to our custom config
mockReq.config.endpoints[EModelEndpoint.openAI] =
mockReq.config.endpoints[customEndpointName];
delete mockReq.config.endpoints[customEndpointName];
const text = 'Test custom endpoint conversation';
const abortController = new AbortController();
const result = await client.titleConvo({ text, abortController });
// Should return undefined without generating title because titleConvo is false
expect(result).toBeUndefined();
// generateTitle should NOT have been called
expect(mockRun.generateTitle).not.toHaveBeenCalled();
// recordCollectedUsage should NOT have been called
expect(client.recordCollectedUsage).not.toHaveBeenCalled();
});
it('should pass titleEndpoint configuration to generateTitle', async () => {
// Mock the API key just for this test
const originalApiKey = process.env.ANTHROPIC_API_KEY;

View File

@@ -2,12 +2,7 @@ const { z } = require('zod');
const fs = require('fs').promises;
const { nanoid } = require('nanoid');
const { logger } = require('@librechat/data-schemas');
const {
agentCreateSchema,
agentUpdateSchema,
mergeAgentOcrConversion,
convertOcrToContextInPlace,
} = require('@librechat/api');
const { agentCreateSchema, agentUpdateSchema } = require('@librechat/api');
const {
Tools,
Constants,
@@ -71,7 +66,7 @@ const createAgentHandler = async (req, res) => {
agentData.author = userId;
agentData.tools = [];
const availableTools = await getCachedTools();
const availableTools = await getCachedTools({ includeGlobal: true });
for (const tool of tools) {
if (availableTools[tool]) {
agentData.tools.push(tool);
@@ -203,32 +198,19 @@ const getAgentHandler = async (req, res, expandProperties = false) => {
* @param {object} req.params - Request params
* @param {string} req.params.id - Agent identifier.
* @param {AgentUpdateParams} req.body - The Agent update parameters.
* @returns {Promise<Agent>} 200 - success response - application/json
* @returns {Agent} 200 - success response - application/json
*/
const updateAgentHandler = async (req, res) => {
try {
const id = req.params.id;
const validatedData = agentUpdateSchema.parse(req.body);
const { _id, ...updateData } = removeNullishValues(validatedData);
// Convert OCR to context in incoming updateData
convertOcrToContextInPlace(updateData);
const existingAgent = await getAgent({ id });
if (!existingAgent) {
return res.status(404).json({ error: 'Agent not found' });
}
// Convert legacy OCR tool resource to context format in existing agent
const ocrConversion = mergeAgentOcrConversion(existingAgent, updateData);
if (ocrConversion.tool_resources) {
updateData.tool_resources = ocrConversion.tool_resources;
}
if (ocrConversion.tools) {
updateData.tools = ocrConversion.tools;
}
let updatedAgent =
Object.keys(updateData).length > 0
? await updateAgent({ id }, updateData, {
@@ -273,7 +255,7 @@ const updateAgentHandler = async (req, res) => {
* @param {object} req - Express Request
* @param {object} req.params - Request params
* @param {string} req.params.id - Agent identifier.
* @returns {Promise<Agent>} 201 - success response - application/json
* @returns {Agent} 201 - success response - application/json
*/
const duplicateAgentHandler = async (req, res) => {
const { id } = req.params;
@@ -306,19 +288,9 @@ const duplicateAgentHandler = async (req, res) => {
hour12: false,
})})`;
if (_tool_resources?.[EToolResources.context]) {
cloneData.tool_resources = {
[EToolResources.context]: _tool_resources[EToolResources.context],
};
}
if (_tool_resources?.[EToolResources.ocr]) {
cloneData.tool_resources = {
/** Legacy conversion from `ocr` to `context` */
[EToolResources.context]: {
...(_tool_resources[EToolResources.context] ?? {}),
..._tool_resources[EToolResources.ocr],
},
[EToolResources.ocr]: _tool_resources[EToolResources.ocr],
};
}
@@ -410,7 +382,7 @@ const duplicateAgentHandler = async (req, res) => {
* @param {object} req - Express Request
* @param {object} req.params - Request params
* @param {string} req.params.id - Agent identifier.
* @returns {Promise<Agent>} 200 - success response - application/json
* @returns {Agent} 200 - success response - application/json
*/
const deleteAgentHandler = async (req, res) => {
try {
@@ -512,7 +484,7 @@ const getListAgentsHandler = async (req, res) => {
* @param {Express.Multer.File} req.file - The avatar image file.
* @param {object} req.body - Request body
* @param {string} [req.body.avatar] - Optional avatar for the agent's avatar.
* @returns {Promise<void>} 200 - success response - application/json
* @returns {Object} 200 - success response - application/json
*/
const uploadAgentAvatarHandler = async (req, res) => {
try {

View File

@@ -512,7 +512,6 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
mockReq.params.id = existingAgentId;
mockReq.body = {
tool_resources: {
/** Legacy conversion from `ocr` to `context` */
ocr: {
file_ids: ['ocr1', 'ocr2'],
},
@@ -532,8 +531,7 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
const updatedAgent = mockRes.json.mock.calls[0][0];
expect(updatedAgent.tool_resources).toBeDefined();
expect(updatedAgent.tool_resources.ocr).toBeUndefined();
expect(updatedAgent.tool_resources.context).toBeDefined();
expect(updatedAgent.tool_resources.ocr).toBeDefined();
expect(updatedAgent.tool_resources.execute_code).toBeDefined();
expect(updatedAgent.tool_resources.invalid_tool).toBeUndefined();
});

View File

@@ -31,7 +31,7 @@ const createAssistant = async (req, res) => {
delete assistantData.conversation_starters;
delete assistantData.append_current_datetime;
const toolDefinitions = await getCachedTools();
const toolDefinitions = await getCachedTools({ includeGlobal: true });
assistantData.tools = tools
.map((tool) => {
@@ -136,7 +136,7 @@ const patchAssistant = async (req, res) => {
...updateData
} = req.body;
const toolDefinitions = await getCachedTools();
const toolDefinitions = await getCachedTools({ includeGlobal: true });
updateData.tools = (updateData.tools ?? [])
.map((tool) => {

View File

@@ -28,7 +28,7 @@ const createAssistant = async (req, res) => {
delete assistantData.conversation_starters;
delete assistantData.append_current_datetime;
const toolDefinitions = await getCachedTools();
const toolDefinitions = await getCachedTools({ includeGlobal: true });
assistantData.tools = tools
.map((tool) => {
@@ -125,7 +125,7 @@ const updateAssistant = async ({ req, openai, assistant_id, updateData }) => {
let hasFileSearch = false;
for (const tool of updateData.tools ?? []) {
const toolDefinitions = await getCachedTools();
const toolDefinitions = await getCachedTools({ includeGlobal: true });
let actualTool = typeof tool === 'string' ? toolDefinitions[tool] : tool;
if (!actualTool && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {

View File

@@ -1,126 +0,0 @@
/**
* MCP Tools Controller
* Handles MCP-specific tool endpoints, decoupled from regular LibreChat tools
*/
const { logger } = require('@librechat/data-schemas');
const { Constants } = require('librechat-data-provider');
const {
cacheMCPServerTools,
getMCPServerTools,
getAppConfig,
} = require('~/server/services/Config');
const { getMCPManager } = require('~/config');
/**
* Get all MCP tools available to the user
*/
const getMCPTools = async (req, res) => {
try {
const userId = req.user?.id;
if (!userId) {
logger.warn('[getMCPTools] User ID not found in request');
return res.status(401).json({ message: 'Unauthorized' });
}
const appConfig = req.config ?? (await getAppConfig({ role: req.user?.role }));
if (!appConfig?.mcpConfig) {
return res.status(200).json({ servers: {} });
}
const mcpManager = getMCPManager();
const configuredServers = Object.keys(appConfig.mcpConfig);
const mcpServers = {};
const cachePromises = configuredServers.map((serverName) =>
getMCPServerTools(serverName).then((tools) => ({ serverName, tools })),
);
const cacheResults = await Promise.all(cachePromises);
const serverToolsMap = new Map();
for (const { serverName, tools } of cacheResults) {
if (tools) {
serverToolsMap.set(serverName, tools);
continue;
}
const serverTools = await mcpManager.getServerToolFunctions(userId, serverName);
if (!serverTools) {
logger.debug(`[getMCPTools] No tools found for server ${serverName}`);
continue;
}
serverToolsMap.set(serverName, serverTools);
if (Object.keys(serverTools).length > 0) {
// Cache asynchronously without blocking
cacheMCPServerTools({ serverName, serverTools }).catch((err) =>
logger.error(`[getMCPTools] Failed to cache tools for ${serverName}:`, err),
);
}
}
// Process each configured server
for (const serverName of configuredServers) {
try {
const serverTools = serverToolsMap.get(serverName);
// Get server config once
const serverConfig = appConfig.mcpConfig[serverName];
const rawServerConfig = mcpManager.getRawConfig(serverName);
// Initialize server object with all server-level data
const server = {
name: serverName,
icon: rawServerConfig?.iconPath || '',
authenticated: true,
authConfig: [],
tools: [],
};
// Set authentication config once for the server
if (serverConfig?.customUserVars) {
const customVarKeys = Object.keys(serverConfig.customUserVars);
if (customVarKeys.length > 0) {
server.authConfig = Object.entries(serverConfig.customUserVars).map(([key, value]) => ({
authField: key,
label: value.title || key,
description: value.description || '',
}));
server.authenticated = false;
}
}
// Process tools efficiently - no need for convertMCPToolToPlugin
if (serverTools) {
for (const [toolKey, toolData] of Object.entries(serverTools)) {
if (!toolData.function || !toolKey.includes(Constants.mcp_delimiter)) {
continue;
}
const toolName = toolKey.split(Constants.mcp_delimiter)[0];
server.tools.push({
name: toolName,
pluginKey: toolKey,
description: toolData.function.description || '',
});
}
}
// Only add server if it has tools or is configured
if (server.tools.length > 0 || serverConfig) {
mcpServers[serverName] = server;
}
} catch (error) {
logger.error(`[getMCPTools] Error loading tools for server ${serverName}:`, error);
}
}
res.status(200).json({ servers: mcpServers });
} catch (error) {
logger.error('[getMCPTools]', error);
res.status(500).json({ message: error.message });
}
};
module.exports = {
getMCPTools,
};

View File

@@ -12,7 +12,6 @@ const { logger } = require('@librechat/data-schemas');
const mongoSanitize = require('express-mongo-sanitize');
const { isEnabled, ErrorController } = require('@librechat/api');
const { connectDb, indexSync } = require('~/db');
const initializeOAuthReconnectManager = require('./services/initializeOAuthReconnectManager');
const createValidateImageRequest = require('./middleware/validateImageRequest');
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
const { updateInterfacePermissions } = require('~/models/interface');
@@ -155,7 +154,7 @@ const startServer = async () => {
res.send(updatedIndexHtml);
});
app.listen(port, host, async () => {
app.listen(port, host, () => {
if (host === '0.0.0.0') {
logger.info(
`Server listening on all interfaces at port ${port}. Use http://localhost:${port} to access it`,
@@ -164,9 +163,7 @@ const startServer = async () => {
logger.info(`Server listening at http://${host == '0.0.0.0' ? 'localhost' : host}:${port}`);
}
await initializeMCPs();
await initializeOAuthReconnectManager();
await checkMigrations();
initializeMCPs().then(() => checkMigrations());
});
};

View File

@@ -1,7 +1,7 @@
const { logger } = require('@librechat/data-schemas');
const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider');
const { getEffectivePermissions } = require('~/server/services/PermissionService');
const { getAgents } = require('~/models/Agent');
const { getAgent } = require('~/models/Agent');
const { getFiles } = require('~/models/File');
/**
@@ -10,12 +10,11 @@ const { getFiles } = require('~/models/File');
*/
const checkAgentBasedFileAccess = async ({ userId, role, fileId }) => {
try {
/** Agents that have this file in their tool_resources */
const agentsWithFile = await getAgents({
// Find agents that have this file in their tool_resources
const agentsWithFile = await getAgent({
$or: [
{ 'tool_resources.execute_code.file_ids': fileId },
{ 'tool_resources.file_search.file_ids': fileId },
{ 'tool_resources.context.file_ids': fileId },
{ 'tool_resources.execute_code.file_ids': fileId },
{ 'tool_resources.ocr.file_ids': fileId },
],
});
@@ -25,7 +24,7 @@ const checkAgentBasedFileAccess = async ({ userId, role, fileId }) => {
}
// Check if user has access to any of these agents
for (const agent of agentsWithFile) {
for (const agent of Array.isArray(agentsWithFile) ? agentsWithFile : [agentsWithFile]) {
// Check if user is the agent author
if (agent.author && agent.author.toString() === userId) {
logger.debug(`[fileAccess] User is author of agent ${agent.id}`);
@@ -84,6 +83,7 @@ const fileAccess = async (req, res, next) => {
});
}
// Get the file
const [file] = await getFiles({ file_id: fileId });
if (!file) {
return res.status(404).json({
@@ -92,18 +92,20 @@ const fileAccess = async (req, res, next) => {
});
}
// Check if user owns the file
if (file.user && file.user.toString() === userId) {
req.fileAccess = { file };
return next();
}
/** Agent-based access (file inherits agent permissions) */
// Check agent-based access (file inherits agent permissions)
const hasAgentAccess = await checkAgentBasedFileAccess({ userId, role: userRole, fileId });
if (hasAgentAccess) {
req.fileAccess = { file };
return next();
}
// No access
logger.warn(`[fileAccess] User ${userId} denied access to file ${fileId}`);
return res.status(403).json({
error: 'Forbidden',

View File

@@ -1,483 +0,0 @@
const mongoose = require('mongoose');
const { ResourceType, PrincipalType, PrincipalModel } = require('librechat-data-provider');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { fileAccess } = require('./fileAccess');
const { User, Role, AclEntry } = require('~/db/models');
const { createAgent } = require('~/models/Agent');
const { createFile } = require('~/models/File');
describe('fileAccess middleware', () => {
let mongoServer;
let req, res, next;
let testUser, otherUser, thirdUser;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await mongoose.connection.dropDatabase();
// Create test role
await Role.create({
name: 'test-role',
permissions: {
AGENTS: {
USE: true,
CREATE: true,
SHARED_GLOBAL: false,
},
},
});
// Create test users
testUser = await User.create({
email: 'test@example.com',
name: 'Test User',
username: 'testuser',
role: 'test-role',
});
otherUser = await User.create({
email: 'other@example.com',
name: 'Other User',
username: 'otheruser',
role: 'test-role',
});
thirdUser = await User.create({
email: 'third@example.com',
name: 'Third User',
username: 'thirduser',
role: 'test-role',
});
// Setup request/response objects
req = {
user: { id: testUser._id.toString(), role: testUser.role },
params: {},
};
res = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
next = jest.fn();
jest.clearAllMocks();
});
describe('basic file access', () => {
test('should allow access when user owns the file', async () => {
// Create a file owned by testUser
await createFile({
user: testUser._id.toString(),
file_id: 'file_owned_by_user',
filepath: '/test/file.txt',
filename: 'file.txt',
type: 'text/plain',
size: 100,
});
req.params.file_id = 'file_owned_by_user';
await fileAccess(req, res, next);
expect(next).toHaveBeenCalled();
expect(req.fileAccess).toBeDefined();
expect(req.fileAccess.file).toBeDefined();
expect(res.status).not.toHaveBeenCalled();
});
test('should deny access when user does not own the file and no agent access', async () => {
// Create a file owned by otherUser
await createFile({
user: otherUser._id.toString(),
file_id: 'file_owned_by_other',
filepath: '/test/file.txt',
filename: 'file.txt',
type: 'text/plain',
size: 100,
});
req.params.file_id = 'file_owned_by_other';
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(403);
expect(res.json).toHaveBeenCalledWith({
error: 'Forbidden',
message: 'Insufficient permissions to access this file',
});
});
test('should return 404 when file does not exist', async () => {
req.params.file_id = 'non_existent_file';
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(404);
expect(res.json).toHaveBeenCalledWith({
error: 'Not Found',
message: 'File not found',
});
});
test('should return 400 when file_id is missing', async () => {
// Don't set file_id in params
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(400);
expect(res.json).toHaveBeenCalledWith({
error: 'Bad Request',
message: 'file_id is required',
});
});
test('should return 401 when user is not authenticated', async () => {
req.user = null;
req.params.file_id = 'some_file';
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(401);
expect(res.json).toHaveBeenCalledWith({
error: 'Unauthorized',
message: 'Authentication required',
});
});
});
describe('agent-based file access', () => {
beforeEach(async () => {
// Create a file owned by otherUser (not testUser)
await createFile({
user: otherUser._id.toString(),
file_id: 'shared_file_via_agent',
filepath: '/test/shared.txt',
filename: 'shared.txt',
type: 'text/plain',
size: 100,
});
});
test('should allow access when user is author of agent with file', async () => {
// Create agent owned by testUser with the file
await createAgent({
id: `agent_${Date.now()}`,
name: 'Test Agent',
provider: 'openai',
model: 'gpt-4',
author: testUser._id,
tool_resources: {
file_search: {
file_ids: ['shared_file_via_agent'],
},
},
});
req.params.file_id = 'shared_file_via_agent';
await fileAccess(req, res, next);
expect(next).toHaveBeenCalled();
expect(req.fileAccess).toBeDefined();
expect(req.fileAccess.file).toBeDefined();
});
test('should allow access when user has VIEW permission on agent with file', async () => {
// Create agent owned by otherUser
const agent = await createAgent({
id: `agent_${Date.now()}`,
name: 'Shared Agent',
provider: 'openai',
model: 'gpt-4',
author: otherUser._id,
tool_resources: {
execute_code: {
file_ids: ['shared_file_via_agent'],
},
},
});
// Grant VIEW permission to testUser
await AclEntry.create({
principalType: PrincipalType.USER,
principalId: testUser._id,
principalModel: PrincipalModel.USER,
resourceType: ResourceType.AGENT,
resourceId: agent._id,
permBits: 1, // VIEW permission
grantedBy: otherUser._id,
});
req.params.file_id = 'shared_file_via_agent';
await fileAccess(req, res, next);
expect(next).toHaveBeenCalled();
expect(req.fileAccess).toBeDefined();
});
test('should check file in ocr tool_resources', async () => {
await createAgent({
id: `agent_ocr_${Date.now()}`,
name: 'OCR Agent',
provider: 'openai',
model: 'gpt-4',
author: testUser._id,
tool_resources: {
ocr: {
file_ids: ['shared_file_via_agent'],
},
},
});
req.params.file_id = 'shared_file_via_agent';
await fileAccess(req, res, next);
expect(next).toHaveBeenCalled();
expect(req.fileAccess).toBeDefined();
});
test('should deny access when user has no permission on agent with file', async () => {
// Create agent owned by otherUser without granting permission to testUser
const agent = await createAgent({
id: `agent_${Date.now()}`,
name: 'Private Agent',
provider: 'openai',
model: 'gpt-4',
author: otherUser._id,
tool_resources: {
file_search: {
file_ids: ['shared_file_via_agent'],
},
},
});
// Create ACL entry for otherUser only (owner)
await AclEntry.create({
principalType: PrincipalType.USER,
principalId: otherUser._id,
principalModel: PrincipalModel.USER,
resourceType: ResourceType.AGENT,
resourceId: agent._id,
permBits: 15, // All permissions
grantedBy: otherUser._id,
});
req.params.file_id = 'shared_file_via_agent';
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(403);
});
});
describe('multiple agents with same file', () => {
/**
* This test suite verifies that when multiple agents have the same file,
* all agents are checked for permissions, not just the first one found.
* This ensures users can access files through any agent they have permission for.
*/
test('should check ALL agents with file, not just first one', async () => {
// Create a file owned by someone else
await createFile({
user: otherUser._id.toString(),
file_id: 'multi_agent_file',
filepath: '/test/multi.txt',
filename: 'multi.txt',
type: 'text/plain',
size: 100,
});
// Create first agent (owned by otherUser, no access for testUser)
const agent1 = await createAgent({
id: 'agent_no_access',
name: 'No Access Agent',
provider: 'openai',
model: 'gpt-4',
author: otherUser._id,
tool_resources: {
file_search: {
file_ids: ['multi_agent_file'],
},
},
});
// Create ACL for agent1 - only otherUser has access
await AclEntry.create({
principalType: PrincipalType.USER,
principalId: otherUser._id,
principalModel: PrincipalModel.USER,
resourceType: ResourceType.AGENT,
resourceId: agent1._id,
permBits: 15,
grantedBy: otherUser._id,
});
// Create second agent (owned by thirdUser, but testUser has VIEW access)
const agent2 = await createAgent({
id: 'agent_with_access',
name: 'Accessible Agent',
provider: 'openai',
model: 'gpt-4',
author: thirdUser._id,
tool_resources: {
file_search: {
file_ids: ['multi_agent_file'],
},
},
});
// Grant testUser VIEW access to agent2
await AclEntry.create({
principalType: PrincipalType.USER,
principalId: testUser._id,
principalModel: PrincipalModel.USER,
resourceType: ResourceType.AGENT,
resourceId: agent2._id,
permBits: 1, // VIEW permission
grantedBy: thirdUser._id,
});
req.params.file_id = 'multi_agent_file';
await fileAccess(req, res, next);
/**
* Should succeed because testUser has access to agent2,
* even though they don't have access to agent1.
* The fix ensures all agents are checked, not just the first one.
*/
expect(next).toHaveBeenCalled();
expect(req.fileAccess).toBeDefined();
expect(res.status).not.toHaveBeenCalled();
});
test('should find file in any agent tool_resources type', async () => {
// Create a file
await createFile({
user: otherUser._id.toString(),
file_id: 'multi_tool_file',
filepath: '/test/tool.txt',
filename: 'tool.txt',
type: 'text/plain',
size: 100,
});
// Agent 1: file in file_search (no access for testUser)
await createAgent({
id: 'agent_file_search',
name: 'File Search Agent',
provider: 'openai',
model: 'gpt-4',
author: otherUser._id,
tool_resources: {
file_search: {
file_ids: ['multi_tool_file'],
},
},
});
// Agent 2: same file in execute_code (testUser has access)
await createAgent({
id: 'agent_execute_code',
name: 'Execute Code Agent',
provider: 'openai',
model: 'gpt-4',
author: thirdUser._id,
tool_resources: {
execute_code: {
file_ids: ['multi_tool_file'],
},
},
});
// Agent 3: same file in ocr (testUser also has access)
await createAgent({
id: 'agent_ocr',
name: 'OCR Agent',
provider: 'openai',
model: 'gpt-4',
author: testUser._id, // testUser owns this one
tool_resources: {
ocr: {
file_ids: ['multi_tool_file'],
},
},
});
req.params.file_id = 'multi_tool_file';
await fileAccess(req, res, next);
/**
* Should succeed because testUser owns agent3,
* even if other agents with the file are found first.
*/
expect(next).toHaveBeenCalled();
expect(req.fileAccess).toBeDefined();
});
});
describe('edge cases', () => {
test('should handle agent with empty tool_resources', async () => {
await createFile({
user: otherUser._id.toString(),
file_id: 'orphan_file',
filepath: '/test/orphan.txt',
filename: 'orphan.txt',
type: 'text/plain',
size: 100,
});
// Create agent with no files in tool_resources
await createAgent({
id: `agent_empty_${Date.now()}`,
name: 'Empty Resources Agent',
provider: 'openai',
model: 'gpt-4',
author: testUser._id,
tool_resources: {},
});
req.params.file_id = 'orphan_file';
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(403);
});
test('should handle agent with null tool_resources', async () => {
await createFile({
user: otherUser._id.toString(),
file_id: 'another_orphan_file',
filepath: '/test/orphan2.txt',
filename: 'orphan2.txt',
type: 'text/plain',
size: 100,
});
// Create agent with null tool_resources
await createAgent({
id: `agent_null_${Date.now()}`,
name: 'Null Resources Agent',
provider: 'openai',
model: 'gpt-4',
author: testUser._id,
tool_resources: null,
});
req.params.file_id = 'another_orphan_file';
await fileAccess(req, res, next);
expect(next).not.toHaveBeenCalled();
expect(res.status).toHaveBeenCalledWith(403);
});
});
});

View File

@@ -1,5 +1,5 @@
const { logger } = require('@librechat/data-schemas');
const { isEmailDomainAllowed } = require('@librechat/api');
const { isEmailDomainAllowed } = require('~/server/services/domains');
const { getAppConfig } = require('~/server/services/Config');
/**

View File

@@ -11,9 +11,6 @@ jest.mock('@librechat/api', () => ({
completeOAuthFlow: jest.fn(),
generateFlowId: jest.fn(),
},
MCPTokenStorage: {
storeTokens: jest.fn(),
},
getUserMCPAuthMap: jest.fn(),
}));
@@ -50,8 +47,8 @@ jest.mock('~/server/services/Config', () => ({
loadCustomConfig: jest.fn(),
}));
jest.mock('~/server/services/Config/mcp', () => ({
updateMCPServerTools: jest.fn(),
jest.mock('~/server/services/Config/mcpToolsCache', () => ({
updateMCPUserTools: jest.fn(),
}));
jest.mock('~/server/services/MCP', () => ({
@@ -237,7 +234,7 @@ describe('MCP Routes', () => {
});
describe('GET /:serverName/oauth/callback', () => {
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
const { MCPOAuthHandler } = require('@librechat/api');
const { getLogStores } = require('~/cache');
it('should redirect to error page when OAuth error is received', async () => {
@@ -283,7 +280,6 @@ describe('MCP Routes', () => {
it('should handle OAuth callback successfully', async () => {
const mockFlowManager = {
completeFlow: jest.fn().mockResolvedValue(),
deleteFlow: jest.fn().mockResolvedValue(true),
};
const mockFlowState = {
serverName: 'test-server',
@@ -299,7 +295,6 @@ describe('MCP Routes', () => {
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
MCPTokenStorage.storeTokens.mockResolvedValue();
getLogStores.mockReturnValue({});
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
@@ -337,24 +332,11 @@ describe('MCP Routes', () => {
'test-auth-code',
mockFlowManager,
);
expect(MCPTokenStorage.storeTokens).toHaveBeenCalledWith(
expect.objectContaining({
userId: 'test-user-id',
serverName: 'test-server',
tokens: mockTokens,
clientInfo: mockFlowState.clientInfo,
metadata: mockFlowState.metadata,
}),
);
const storeInvocation = MCPTokenStorage.storeTokens.mock.invocationCallOrder[0];
const connectInvocation = mockMcpManager.getUserConnection.mock.invocationCallOrder[0];
expect(storeInvocation).toBeLessThan(connectInvocation);
expect(mockFlowManager.completeFlow).toHaveBeenCalledWith(
'tool-flow-123',
'mcp_oauth',
mockTokens,
);
expect(mockFlowManager.deleteFlow).toHaveBeenCalledWith('test-flow-id', 'mcp_get_tokens');
});
it('should redirect to error page when callback processing fails', async () => {
@@ -372,7 +354,6 @@ describe('MCP Routes', () => {
it('should handle system-level OAuth completion', async () => {
const mockFlowManager = {
completeFlow: jest.fn().mockResolvedValue(),
deleteFlow: jest.fn().mockResolvedValue(true),
};
const mockFlowState = {
serverName: 'test-server',
@@ -388,7 +369,6 @@ describe('MCP Routes', () => {
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
MCPTokenStorage.storeTokens.mockResolvedValue();
getLogStores.mockReturnValue({});
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
@@ -399,13 +379,11 @@ describe('MCP Routes', () => {
expect(response.status).toBe(302);
expect(response.headers.location).toBe('/oauth/success?serverName=test-server');
expect(mockFlowManager.deleteFlow).toHaveBeenCalledWith('test-flow-id', 'mcp_get_tokens');
});
it('should handle reconnection failure after OAuth', async () => {
const mockFlowManager = {
completeFlow: jest.fn().mockResolvedValue(),
deleteFlow: jest.fn().mockResolvedValue(true),
};
const mockFlowState = {
serverName: 'test-server',
@@ -421,7 +399,6 @@ describe('MCP Routes', () => {
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
MCPTokenStorage.storeTokens.mockResolvedValue();
getLogStores.mockReturnValue({});
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
@@ -441,46 +418,6 @@ describe('MCP Routes', () => {
expect(response.status).toBe(302);
expect(response.headers.location).toBe('/oauth/success?serverName=test-server');
expect(MCPTokenStorage.storeTokens).toHaveBeenCalled();
expect(mockFlowManager.deleteFlow).toHaveBeenCalledWith('test-flow-id', 'mcp_get_tokens');
});
it('should redirect to error page if token storage fails', async () => {
const mockFlowManager = {
completeFlow: jest.fn().mockResolvedValue(),
deleteFlow: jest.fn().mockResolvedValue(true),
};
const mockFlowState = {
serverName: 'test-server',
userId: 'test-user-id',
metadata: { toolFlowId: 'tool-flow-123' },
clientInfo: {},
codeVerifier: 'test-verifier',
};
const mockTokens = {
access_token: 'test-access-token',
refresh_token: 'test-refresh-token',
};
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
MCPTokenStorage.storeTokens.mockRejectedValue(new Error('store failed'));
getLogStores.mockReturnValue({});
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
const mockMcpManager = {
getUserConnection: jest.fn(),
};
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
const response = await request(app).get('/api/mcp/test-server/oauth/callback').query({
code: 'test-auth-code',
state: 'test-flow-id',
});
expect(response.status).toBe(302);
expect(response.headers.location).toBe('/oauth/error?error=callback_failed');
expect(mockMcpManager.getUserConnection).not.toHaveBeenCalled();
});
});
@@ -841,10 +778,10 @@ describe('MCP Routes', () => {
require('~/cache').getLogStores.mockReturnValue({});
const { getCachedTools, setCachedTools } = require('~/server/services/Config');
const { updateMCPServerTools } = require('~/server/services/Config/mcp');
const { updateMCPUserTools } = require('~/server/services/Config/mcpToolsCache');
getCachedTools.mockResolvedValue({});
setCachedTools.mockResolvedValue();
updateMCPServerTools.mockResolvedValue();
updateMCPUserTools.mockResolvedValue();
require('~/server/services/Tools/mcp').reinitMCPServer.mockResolvedValue({
success: true,
@@ -899,10 +836,10 @@ describe('MCP Routes', () => {
]);
const { getCachedTools, setCachedTools } = require('~/server/services/Config');
const { updateMCPServerTools } = require('~/server/services/Config/mcp');
const { updateMCPUserTools } = require('~/server/services/Config/mcpToolsCache');
getCachedTools.mockResolvedValue({});
setCachedTools.mockResolvedValue();
updateMCPServerTools.mockResolvedValue();
updateMCPUserTools.mockResolvedValue();
require('~/server/services/Tools/mcp').reinitMCPServer.mockResolvedValue({
success: true,
@@ -1206,11 +1143,7 @@ describe('MCP Routes', () => {
describe('GET /:serverName/oauth/callback - Edge Cases', () => {
it('should handle OAuth callback without toolFlowId (falsy toolFlowId)', async () => {
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
const mockTokens = {
access_token: 'edge-access-token',
refresh_token: 'edge-refresh-token',
};
const { MCPOAuthHandler } = require('@librechat/api');
MCPOAuthHandler.getFlowState = jest.fn().mockResolvedValue({
id: 'test-flow-id',
userId: 'test-user-id',
@@ -1222,8 +1155,6 @@ describe('MCP Routes', () => {
clientInfo: {},
codeVerifier: 'test-verifier',
});
MCPOAuthHandler.completeOAuthFlow = jest.fn().mockResolvedValue(mockTokens);
MCPTokenStorage.storeTokens.mockResolvedValue();
const mockFlowManager = {
completeFlow: jest.fn(),
@@ -1248,11 +1179,6 @@ describe('MCP Routes', () => {
it('should handle null cached tools in OAuth callback (triggers || {} fallback)', async () => {
const { getCachedTools } = require('~/server/services/Config');
getCachedTools.mockResolvedValue(null);
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
const mockTokens = {
access_token: 'edge-access-token',
refresh_token: 'edge-refresh-token',
};
const mockFlowManager = {
getFlowState: jest.fn().mockResolvedValue({
@@ -1265,15 +1191,6 @@ describe('MCP Routes', () => {
completeFlow: jest.fn(),
};
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
MCPOAuthHandler.getFlowState.mockResolvedValue({
serverName: 'test-server',
userId: 'test-user-id',
metadata: { serverUrl: 'https://example.com', oauth: {} },
clientInfo: {},
codeVerifier: 'test-verifier',
});
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
MCPTokenStorage.storeTokens.mockResolvedValue();
const mockMcpManager = {
getUserConnection: jest.fn().mockResolvedValue({

View File

@@ -1,19 +1,20 @@
const express = require('express');
const { nanoid } = require('nanoid');
const { logger } = require('@librechat/data-schemas');
const { generateCheckAccess, isActionDomainAllowed } = require('@librechat/api');
const { generateCheckAccess } = require('@librechat/api');
const {
Permissions,
ResourceType,
PermissionBits,
PermissionTypes,
actionDelimiter,
PermissionBits,
removeNullishValues,
} = require('librechat-data-provider');
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
const { findAccessibleResources } = require('~/server/services/PermissionService');
const { getAgent, updateAgent, getListAgentsByAccess } = require('~/models/Agent');
const { updateAction, getActions, deleteAction } = require('~/models/Action');
const { isActionDomainAllowed } = require('~/server/services/domains');
const { canAccessAgentResource } = require('~/server/middleware');
const { getRoleByName } = require('~/models/Role');

View File

@@ -1,12 +1,12 @@
const express = require('express');
const { nanoid } = require('nanoid');
const { logger } = require('@librechat/data-schemas');
const { isActionDomainAllowed } = require('@librechat/api');
const { actionDelimiter, EModelEndpoint, removeNullishValues } = require('librechat-data-provider');
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
const { updateAction, getActions, deleteAction } = require('~/models/Action');
const { updateAssistantDoc, getAssistant } = require('~/models/Assistant');
const { isActionDomainAllowed } = require('~/server/services/domains');
const router = express.Router();

View File

@@ -1,33 +1,19 @@
const { Router } = require('express');
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, Constants } = require('librechat-data-provider');
const {
createSafeUser,
MCPOAuthHandler,
MCPTokenStorage,
getUserMCPAuthMap,
} = require('@librechat/api');
const { getMCPManager, getFlowStateManager, getOAuthReconnectionManager } = require('~/config');
const { MCPOAuthHandler, getUserMCPAuthMap } = require('@librechat/api');
const { getMCPSetupData, getServerConnectionStatus } = require('~/server/services/MCP');
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
const { updateMCPUserTools } = require('~/server/services/Config/mcpToolsCache');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { updateMCPServerTools } = require('~/server/services/Config/mcp');
const { CacheKeys, Constants } = require('librechat-data-provider');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { reinitMCPServer } = require('~/server/services/Tools/mcp');
const { getMCPTools } = require('~/server/controllers/mcp');
const { requireJwtAuth } = require('~/server/middleware');
const { findPluginAuthsByKeys } = require('~/models');
const { getLogStores } = require('~/cache');
const router = Router();
/**
* Get all MCP tools available to the user
* Returns only MCP tools, completely decoupled from regular LibreChat tools
*/
router.get('/tools', requireJwtAuth, async (req, res) => {
return getMCPTools(req, res);
});
/**
* Initiate OAuth flow
* This endpoint is called when the user clicks the auth link in the UI
@@ -135,41 +121,6 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
const tokens = await MCPOAuthHandler.completeOAuthFlow(flowId, code, flowManager);
logger.info('[MCP OAuth] OAuth flow completed, tokens received in callback route');
/** Persist tokens immediately so reconnection uses fresh credentials */
if (flowState?.userId && tokens) {
try {
await MCPTokenStorage.storeTokens({
userId: flowState.userId,
serverName,
tokens,
createToken,
updateToken,
findToken,
clientInfo: flowState.clientInfo,
metadata: flowState.metadata,
});
logger.debug('[MCP OAuth] Stored OAuth tokens prior to reconnection', {
serverName,
userId: flowState.userId,
});
} catch (error) {
logger.error('[MCP OAuth] Failed to store OAuth tokens after callback', error);
throw error;
}
/**
* Clear any cached `mcp_get_tokens` flow result so subsequent lookups
* re-fetch the freshly stored credentials instead of returning stale nulls.
*/
if (typeof flowManager?.deleteFlow === 'function') {
try {
await flowManager.deleteFlow(flowId, 'mcp_get_tokens');
} catch (error) {
logger.warn('[MCP OAuth] Failed to clear cached token flow state', error);
}
}
}
try {
const mcpManager = getMCPManager(flowState.userId);
logger.debug(`[MCP OAuth] Attempting to reconnect ${serverName} with new OAuth tokens`);
@@ -193,12 +144,9 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
`[MCP OAuth] Successfully reconnected ${serverName} for user ${flowState.userId}`,
);
// clear any reconnection attempts
const oauthReconnectionManager = getOAuthReconnectionManager();
oauthReconnectionManager.clearReconnection(flowState.userId, serverName);
const tools = await userConnection.fetchTools();
await updateMCPServerTools({
await updateMCPUserTools({
userId: flowState.userId,
serverName,
tools,
});
@@ -340,9 +288,9 @@ router.post('/oauth/cancel/:serverName', requireJwtAuth, async (req, res) => {
router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
try {
const { serverName } = req.params;
const user = createSafeUser(req.user);
const user = req.user;
if (!user.id) {
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
@@ -372,7 +320,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
}
const result = await reinitMCPServer({
user,
req,
serverName,
userMCPAuthMap,
});

View File

@@ -3,8 +3,8 @@ const { logger } = require('@librechat/data-schemas');
const { ContentTypes } = require('librechat-data-provider');
const {
saveConvo,
getMessage,
saveMessage,
getMessage,
getMessages,
updateMessage,
deleteMessages,
@@ -58,51 +58,34 @@ router.get('/', async (req, res) => {
const nextCursor = messages.length > pageSize ? messages.pop()[sortField] : null;
response = { messages, nextCursor };
} else if (search) {
const searchResults = await Message.meiliSearch(search, { filter: `user = "${user}"` }, true);
const searchResults = await Message.meiliSearch(search, undefined, true);
const messages = searchResults.hits || [];
const result = await getConvosQueried(req.user.id, messages, cursor);
const messageIds = [];
const cleanedMessages = [];
const activeMessages = [];
for (let i = 0; i < messages.length; i++) {
let message = messages[i];
if (message.conversationId.includes('--')) {
message.conversationId = cleanUpPrimaryKeyValue(message.conversationId);
}
if (result.convoMap[message.conversationId]) {
messageIds.push(message.messageId);
cleanedMessages.push(message);
const convo = result.convoMap[message.conversationId];
const dbMessage = await getMessage({ user, messageId: message.messageId });
activeMessages.push({
...message,
title: convo.title,
conversationId: message.conversationId,
model: convo.model,
isCreatedByUser: dbMessage?.isCreatedByUser,
endpoint: dbMessage?.endpoint,
iconURL: dbMessage?.iconURL,
});
}
}
const dbMessages = await getMessages({
user,
messageId: { $in: messageIds },
});
const dbMessageMap = {};
for (const dbMessage of dbMessages) {
dbMessageMap[dbMessage.messageId] = dbMessage;
}
const activeMessages = [];
for (const message of cleanedMessages) {
const convo = result.convoMap[message.conversationId];
const dbMessage = dbMessageMap[message.messageId];
activeMessages.push({
...message,
title: convo.title,
conversationId: message.conversationId,
model: convo.model,
isCreatedByUser: dbMessage?.isCreatedByUser,
endpoint: dbMessage?.endpoint,
iconURL: dbMessage?.iconURL,
});
}
response = { messages: activeMessages, nextCursor: null };
} else {
response = { messages: [], nextCursor: null };

View File

@@ -1,12 +1,16 @@
const { FileSources, EModelEndpoint, getConfigDefaults } = require('librechat-data-provider');
const {
isEnabled,
loadOCRConfig,
loadMemoryConfig,
agentsConfigSetup,
loadWebSearchConfig,
loadDefaultInterface,
} = require('@librechat/api');
const {
FileSources,
loadOCRConfig,
EModelEndpoint,
getConfigDefaults,
} = require('librechat-data-provider');
const {
checkWebSearchConfig,
checkVariables,

View File

@@ -142,6 +142,7 @@ describe('AppService', () => {
turnstileConfig: mockedTurnstileConfig,
modelSpecs: undefined,
paths: expect.anything(),
ocr: expect.anything(),
imageOutputType: expect.any(String),
fileConfig: undefined,
secureImageLinks: undefined,

View File

@@ -2,7 +2,7 @@ const bcrypt = require('bcryptjs');
const jwt = require('jsonwebtoken');
const { webcrypto } = require('node:crypto');
const { logger } = require('@librechat/data-schemas');
const { isEnabled, checkEmailConfig, isEmailDomainAllowed } = require('@librechat/api');
const { isEnabled, checkEmailConfig } = require('@librechat/api');
const { ErrorTypes, SystemRoles, errorsToString } = require('librechat-data-provider');
const {
findUser,
@@ -20,6 +20,7 @@ const {
deleteUserById,
generateRefreshToken,
} = require('~/models');
const { isEmailDomainAllowed } = require('~/server/services/domains');
const { registerSchema } = require('~/strategies/validators');
const { getAppConfig } = require('~/server/services/Config');
const { sendEmail } = require('~/server/utils');
@@ -129,7 +130,7 @@ const verifyEmail = async (req) => {
return { message: 'Email already verified', status: 'success' };
}
let emailVerificationData = await findToken({ email: decodedEmail }, { sort: { createdAt: -1 } });
let emailVerificationData = await findToken({ email: decodedEmail });
if (!emailVerificationData) {
logger.warn(`[verifyEmail] [No email verification data found] [Email: ${decodedEmail}]`);
@@ -319,12 +320,9 @@ const requestPasswordReset = async (req) => {
* @returns
*/
const resetPassword = async (userId, token, password) => {
let passwordResetToken = await findToken(
{
userId,
},
{ sort: { createdAt: -1 } },
);
let passwordResetToken = await findToken({
userId,
});
if (!passwordResetToken) {
return new Error('Invalid or expired password reset token');
@@ -359,18 +357,23 @@ const resetPassword = async (userId, token, password) => {
/**
* Set Auth Tokens
*
* @param {String | ObjectId} userId
* @param {ServerResponse} res
* @param {ISession | null} [session=null]
* @param {Object} res
* @param {String} sessionId
* @returns
*/
const setAuthTokens = async (userId, res, _session = null) => {
const setAuthTokens = async (userId, res, sessionId = null) => {
try {
let session = _session;
const user = await getUserById(userId);
const token = await generateToken(user);
let session;
let refreshToken;
let refreshTokenExpires;
if (session && session._id && session.expiration != null) {
if (sessionId) {
session = await findSession({ sessionId: sessionId }, { lean: false });
refreshTokenExpires = session.expiration.getTime();
refreshToken = await generateRefreshToken(session);
} else {
@@ -380,9 +383,6 @@ const setAuthTokens = async (userId, res, _session = null) => {
refreshTokenExpires = session.expiration.getTime();
}
const user = await getUserById(userId);
const token = await generateToken(user);
res.cookie('refreshToken', refreshToken, {
expires: new Date(refreshTokenExpires),
httpOnly: true,

View File

@@ -36,7 +36,7 @@ async function getAppConfig(options = {}) {
}
if (baseConfig.availableTools) {
await setCachedTools(baseConfig.availableTools);
await setCachedTools(baseConfig.availableTools, { isGlobal: true });
}
await cache.set(BASE_CONFIG_KEY, baseConfig);

View File

@@ -3,32 +3,89 @@ const getLogStores = require('~/cache/getLogStores');
/**
* Cache key generators for different tool access patterns
* These will support future permission-based caching
*/
const ToolCacheKeys = {
/** Global tools available to all users */
GLOBAL: 'tools:global',
/** MCP tools cached by server name */
MCP_SERVER: (serverName) => `tools:mcp:${serverName}`,
/** Tools available to a specific user */
USER: (userId) => `tools:user:${userId}`,
/** Tools available to a specific role */
ROLE: (roleId) => `tools:role:${roleId}`,
/** Tools available to a specific group */
GROUP: (groupId) => `tools:group:${groupId}`,
/** Combined effective tools for a user (computed from all sources) */
EFFECTIVE: (userId) => `tools:effective:${userId}`,
};
/**
* Retrieves available tools from cache
* @function getCachedTools
* @param {Object} options - Options for retrieving tools
* @param {string} [options.serverName] - MCP server name to get cached tools for
* @param {string} [options.userId] - User ID for user-specific tools
* @param {string[]} [options.roleIds] - Role IDs for role-based tools
* @param {string[]} [options.groupIds] - Group IDs for group-based tools
* @param {boolean} [options.includeGlobal=true] - Whether to include global tools
* @returns {Promise<LCAvailableTools|null>} The available tools object or null if not cached
*/
async function getCachedTools(options = {}) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const { serverName } = options;
const { userId, roleIds = [], groupIds = [], includeGlobal = true } = options;
// Return MCP server-specific tools if requested
if (serverName) {
return await cache.get(ToolCacheKeys.MCP_SERVER(serverName));
// For now, return global tools (current behavior)
// This will be expanded to merge tools from different sources
if (!userId && includeGlobal) {
return await cache.get(ToolCacheKeys.GLOBAL);
}
// Default to global tools
return await cache.get(ToolCacheKeys.GLOBAL);
// Future implementation will merge tools from multiple sources
// based on user permissions, roles, and groups
if (userId) {
/** @type {LCAvailableTools | null} Check if we have pre-computed effective tools for this user */
const effectiveTools = await cache.get(ToolCacheKeys.EFFECTIVE(userId));
if (effectiveTools) {
return effectiveTools;
}
/** @type {LCAvailableTools | null} Otherwise, compute from individual sources */
const toolSources = [];
if (includeGlobal) {
const globalTools = await cache.get(ToolCacheKeys.GLOBAL);
if (globalTools) {
toolSources.push(globalTools);
}
}
// User-specific tools
const userTools = await cache.get(ToolCacheKeys.USER(userId));
if (userTools) {
toolSources.push(userTools);
}
// Role-based tools
for (const roleId of roleIds) {
const roleTools = await cache.get(ToolCacheKeys.ROLE(roleId));
if (roleTools) {
toolSources.push(roleTools);
}
}
// Group-based tools
for (const groupId of groupIds) {
const groupTools = await cache.get(ToolCacheKeys.GROUP(groupId));
if (groupTools) {
toolSources.push(groupTools);
}
}
// Merge all tool sources (for now, simple merge - future will handle conflicts)
if (toolSources.length > 0) {
return mergeToolSources(toolSources);
}
}
return null;
}
/**
@@ -36,34 +93,49 @@ async function getCachedTools(options = {}) {
* @function setCachedTools
* @param {Object} tools - The tools object to cache
* @param {Object} options - Options for caching tools
* @param {string} [options.serverName] - MCP server name for server-specific tools
* @param {string} [options.userId] - User ID for user-specific tools
* @param {string} [options.roleId] - Role ID for role-based tools
* @param {string} [options.groupId] - Group ID for group-based tools
* @param {boolean} [options.isGlobal=false] - Whether these are global tools
* @param {number} [options.ttl] - Time to live in milliseconds
* @returns {Promise<boolean>} Whether the operation was successful
*/
async function setCachedTools(tools, options = {}) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const { serverName, ttl } = options;
const { userId, roleId, groupId, isGlobal = false, ttl } = options;
// Cache by MCP server if specified
if (serverName) {
return await cache.set(ToolCacheKeys.MCP_SERVER(serverName), tools, ttl);
let cacheKey;
if (isGlobal || (!userId && !roleId && !groupId)) {
cacheKey = ToolCacheKeys.GLOBAL;
} else if (userId) {
cacheKey = ToolCacheKeys.USER(userId);
} else if (roleId) {
cacheKey = ToolCacheKeys.ROLE(roleId);
} else if (groupId) {
cacheKey = ToolCacheKeys.GROUP(groupId);
}
// Default to global cache
return await cache.set(ToolCacheKeys.GLOBAL, tools, ttl);
if (!cacheKey) {
throw new Error('Invalid cache key options provided');
}
return await cache.set(cacheKey, tools, ttl);
}
/**
* Invalidates cached tools
* @function invalidateCachedTools
* @param {Object} options - Options for invalidating tools
* @param {string} [options.serverName] - MCP server name to invalidate
* @param {string} [options.userId] - User ID to invalidate
* @param {string} [options.roleId] - Role ID to invalidate
* @param {string} [options.groupId] - Group ID to invalidate
* @param {boolean} [options.invalidateGlobal=false] - Whether to invalidate global tools
* @param {boolean} [options.invalidateEffective=true] - Whether to invalidate effective tools
* @returns {Promise<void>}
*/
async function invalidateCachedTools(options = {}) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const { serverName, invalidateGlobal = false } = options;
const { userId, roleId, groupId, invalidateGlobal = false, invalidateEffective = true } = options;
const keysToDelete = [];
@@ -71,34 +143,116 @@ async function invalidateCachedTools(options = {}) {
keysToDelete.push(ToolCacheKeys.GLOBAL);
}
if (serverName) {
keysToDelete.push(ToolCacheKeys.MCP_SERVER(serverName));
if (userId) {
keysToDelete.push(ToolCacheKeys.USER(userId));
if (invalidateEffective) {
keysToDelete.push(ToolCacheKeys.EFFECTIVE(userId));
}
}
if (roleId) {
keysToDelete.push(ToolCacheKeys.ROLE(roleId));
// TODO: In future, invalidate all users with this role
}
if (groupId) {
keysToDelete.push(ToolCacheKeys.GROUP(groupId));
// TODO: In future, invalidate all users in this group
}
await Promise.all(keysToDelete.map((key) => cache.delete(key)));
}
/**
* Gets MCP tools for a specific server from cache or merges with global tools
* @function getMCPServerTools
* @param {string} serverName - The MCP server name
* @returns {Promise<LCAvailableTools|null>} The available tools for the server
* Computes and caches effective tools for a user
* @function computeEffectiveTools
* @param {string} userId - The user ID
* @param {Object} context - Context containing user's roles and groups
* @param {string[]} [context.roleIds=[]] - User's role IDs
* @param {string[]} [context.groupIds=[]] - User's group IDs
* @param {number} [ttl] - Time to live for the computed result
* @returns {Promise<Object>} The computed effective tools
*/
async function getMCPServerTools(serverName) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const serverTools = await cache.get(ToolCacheKeys.MCP_SERVER(serverName));
async function computeEffectiveTools(userId, context = {}, ttl) {
const { roleIds = [], groupIds = [] } = context;
if (serverTools) {
return serverTools;
// Get all tool sources
const tools = await getCachedTools({
userId,
roleIds,
groupIds,
includeGlobal: true,
});
if (tools) {
// Cache the computed result
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.set(ToolCacheKeys.EFFECTIVE(userId), tools, ttl);
}
return null;
return tools;
}
/**
* Merges multiple tool sources into a single tools object
* @function mergeToolSources
* @param {Object[]} sources - Array of tool objects to merge
* @returns {Object} Merged tools object
*/
function mergeToolSources(sources) {
// For now, simple merge that combines all tools
// Future implementation will handle:
// - Permission precedence (deny > allow)
// - Tool property conflicts
// - Metadata merging
const merged = {};
for (const source of sources) {
if (!source || typeof source !== 'object') {
continue;
}
for (const [toolId, toolConfig] of Object.entries(source)) {
// Simple last-write-wins for now
// Future: merge based on permission levels
merged[toolId] = toolConfig;
}
}
return merged;
}
/**
* Middleware-friendly function to get tools for a request
* @function getToolsForRequest
* @param {Object} req - Express request object
* @returns {Promise<Object|null>} Available tools for the request
*/
async function getToolsForRequest(req) {
const userId = req.user?.id;
// For now, return global tools if no user
if (!userId) {
return getCachedTools({ includeGlobal: true });
}
// Future: Extract roles and groups from req.user
const roleIds = req.user?.roles || [];
const groupIds = req.user?.groups || [];
return getCachedTools({
userId,
roleIds,
groupIds,
includeGlobal: true,
});
}
module.exports = {
ToolCacheKeys,
getCachedTools,
setCachedTools,
getMCPServerTools,
getToolsForRequest,
invalidateCachedTools,
computeEffectiveTools,
};

View File

@@ -1,7 +1,7 @@
const appConfig = require('./app');
const mcpToolsCache = require('./mcp');
const { config } = require('./EndpointService');
const getCachedTools = require('./getCachedTools');
const mcpToolsCache = require('./mcpToolsCache');
const loadCustomConfig = require('./loadCustomConfig');
const loadConfigModels = require('./loadConfigModels');
const loadDefaultModels = require('./loadDefaultModels');

View File

@@ -1,91 +0,0 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, Constants } = require('librechat-data-provider');
const { getCachedTools, setCachedTools } = require('./getCachedTools');
const { getLogStores } = require('~/cache');
/**
* Updates MCP tools in the cache for a specific server
* @param {Object} params - Parameters for updating MCP tools
* @param {string} params.serverName - MCP server name
* @param {Array} params.tools - Array of tool objects from MCP server
* @returns {Promise<LCAvailableTools>}
*/
async function updateMCPServerTools({ serverName, tools }) {
try {
const serverTools = {};
const mcpDelimiter = Constants.mcp_delimiter;
for (const tool of tools) {
const name = `${tool.name}${mcpDelimiter}${serverName}`;
serverTools[name] = {
type: 'function',
['function']: {
name,
description: tool.description,
parameters: tool.inputSchema,
},
};
}
await setCachedTools(serverTools, { serverName });
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.delete(CacheKeys.TOOLS);
logger.debug(`[MCP Cache] Updated ${tools.length} tools for server ${serverName}`);
return serverTools;
} catch (error) {
logger.error(`[MCP Cache] Failed to update tools for ${serverName}:`, error);
throw error;
}
}
/**
* Merges app-level tools with global tools
* @param {import('@librechat/api').LCAvailableTools} appTools
* @returns {Promise<void>}
*/
async function mergeAppTools(appTools) {
try {
const count = Object.keys(appTools).length;
if (!count) {
return;
}
const cachedTools = await getCachedTools();
const mergedTools = { ...cachedTools, ...appTools };
await setCachedTools(mergedTools);
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.delete(CacheKeys.TOOLS);
logger.debug(`Merged ${count} app-level tools`);
} catch (error) {
logger.error('Failed to merge app-level tools:', error);
throw error;
}
}
/**
* Caches MCP server tools (no longer merges with global)
* @param {object} params
* @param {string} params.serverName
* @param {import('@librechat/api').LCAvailableTools} params.serverTools
* @returns {Promise<void>}
*/
async function cacheMCPServerTools({ serverName, serverTools }) {
try {
const count = Object.keys(serverTools).length;
if (!count) {
return;
}
// Only cache server-specific tools, no merging with global
await setCachedTools(serverTools, { serverName });
logger.debug(`Cached ${count} MCP server tools for ${serverName}`);
} catch (error) {
logger.error(`Failed to cache MCP server tools for ${serverName}:`, error);
throw error;
}
}
module.exports = {
mergeAppTools,
cacheMCPServerTools,
updateMCPServerTools,
};

View File

@@ -0,0 +1,143 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, Constants } = require('librechat-data-provider');
const { getCachedTools, setCachedTools } = require('./getCachedTools');
const { getLogStores } = require('~/cache');
/**
* Updates MCP tools in the cache for a specific server and user
* @param {Object} params - Parameters for updating MCP tools
* @param {string} params.userId - User ID
* @param {string} params.serverName - MCP server name
* @param {Array} params.tools - Array of tool objects from MCP server
* @returns {Promise<LCAvailableTools>}
*/
async function updateMCPUserTools({ userId, serverName, tools }) {
try {
const userTools = await getCachedTools({ userId });
const mcpDelimiter = Constants.mcp_delimiter;
for (const key of Object.keys(userTools)) {
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
delete userTools[key];
}
}
for (const tool of tools) {
const name = `${tool.name}${Constants.mcp_delimiter}${serverName}`;
userTools[name] = {
type: 'function',
['function']: {
name,
description: tool.description,
parameters: tool.inputSchema,
},
};
}
await setCachedTools(userTools, { userId });
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.delete(CacheKeys.TOOLS);
logger.debug(`[MCP Cache] Updated ${tools.length} tools for ${serverName} user ${userId}`);
return userTools;
} catch (error) {
logger.error(`[MCP Cache] Failed to update tools for ${serverName}:`, error);
throw error;
}
}
/**
* Merges app-level tools with global tools
* @param {import('@librechat/api').LCAvailableTools} appTools
* @returns {Promise<void>}
*/
async function mergeAppTools(appTools) {
try {
const count = Object.keys(appTools).length;
if (!count) {
return;
}
const cachedTools = await getCachedTools({ includeGlobal: true });
const mergedTools = { ...cachedTools, ...appTools };
await setCachedTools(mergedTools, { isGlobal: true });
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.delete(CacheKeys.TOOLS);
logger.debug(`Merged ${count} app-level tools`);
} catch (error) {
logger.error('Failed to merge app-level tools:', error);
throw error;
}
}
/**
* Merges user-level tools with global tools
* @param {object} params
* @param {string} params.userId
* @param {Record<string, FunctionTool>} params.cachedUserTools
* @param {import('@librechat/api').LCAvailableTools} params.userTools
* @returns {Promise<void>}
*/
async function mergeUserTools({ userId, cachedUserTools, userTools }) {
try {
if (!userId) {
return;
}
const count = Object.keys(userTools).length;
if (!count) {
return;
}
const cachedTools = cachedUserTools ?? (await getCachedTools({ userId }));
const mergedTools = { ...cachedTools, ...userTools };
await setCachedTools(mergedTools, { userId });
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.delete(CacheKeys.TOOLS);
logger.debug(`Merged ${count} user-level tools`);
} catch (error) {
logger.error('Failed to merge user-level tools:', error);
throw error;
}
}
/**
* Clears all MCP tools for a specific server
* @param {Object} params - Parameters for clearing MCP tools
* @param {string} [params.userId] - User ID (if clearing user-specific tools)
* @param {string} params.serverName - MCP server name
* @returns {Promise<void>}
*/
async function clearMCPServerTools({ userId, serverName }) {
try {
const tools = await getCachedTools({ userId, includeGlobal: !userId });
// Remove all tools for this server
const mcpDelimiter = Constants.mcp_delimiter;
let removedCount = 0;
for (const key of Object.keys(tools)) {
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
delete tools[key];
removedCount++;
}
}
if (removedCount > 0) {
await setCachedTools(tools, userId ? { userId } : { isGlobal: true });
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.delete(CacheKeys.TOOLS);
logger.debug(
`[MCP Cache] Removed ${removedCount} tools for ${serverName}${userId ? ` user ${userId}` : ' (global)'}`,
);
}
} catch (error) {
logger.error(`[MCP Cache] Failed to clear tools for ${serverName}:`, error);
throw error;
}
}
module.exports = {
mergeAppTools,
mergeUserTools,
updateMCPUserTools,
clearMCPServerTools,
};

View File

@@ -54,11 +54,6 @@ const addTitle = async (req, { text, response, client }) => {
clearTimeout(timeoutId);
}
if (!title) {
logger.debug(`[${key}] No title generated`);
return;
}
await titleCache.set(key, title, 120000);
await saveConvo(
req,

View File

@@ -552,7 +552,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
throw new Error('File search is not enabled for Agents');
}
// Note: File search processing continues to dual storage logic below
} else if (tool_resource === EToolResources.context) {
} else if (tool_resource === EToolResources.ocr) {
const { file_id, temp_file_id = null } = metadata;
/**
@@ -594,9 +594,10 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
const fileConfig = mergeFileConfig(appConfig.fileConfig);
const shouldUseOCR =
appConfig?.ocr != null &&
fileConfig.checkType(file.mimetype, fileConfig.ocr?.supportedMimeTypes || []);
const shouldUseOCR = fileConfig.checkType(
file.mimetype,
fileConfig.ocr?.supportedMimeTypes || [],
);
if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) {
throw new Error('OCR capability is not enabled for Agents');
@@ -625,7 +626,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
);
if (!shouldUseText) {
throw new Error(`File type ${file.mimetype} is not supported for text parsing.`);
throw new Error(`File type ${file.mimetype} is not supported for OCR or text parsing`);
}
const { text, bytes } = await parseText({ req, file, file_id });
@@ -701,6 +702,8 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
returnFile: true,
});
filepath = result.filepath;
width = result.width;
height = result.height;
}
const fileInfo = removeNullishValues({

View File

@@ -0,0 +1,520 @@
const { EToolResources, FileSources, FileContext } = require('librechat-data-provider');
jest.mock('~/server/services/Files/strategies', () => {
const mockHandleFileUpload = jest.fn();
const mockHandleImageUpload = jest.fn();
return {
getStrategyFunctions: jest.fn((source) => ({
handleFileUpload: mockHandleFileUpload.mockImplementation(({ file, file_id }) =>
Promise.resolve({
filepath: `/uploads/${source}/${file_id}`,
bytes: file?.size || 20,
}),
),
handleImageUpload: mockHandleImageUpload.mockImplementation(({ file, file_id }) =>
Promise.resolve({
filepath: `/uploads/${source}/images/${file_id}`,
bytes: file.size,
width: 800,
height: 600,
}),
),
})),
};
});
jest.mock('~/models/File', () => {
const mockCreateFile = jest.fn();
return {
createFile: mockCreateFile.mockImplementation((fileInfo) =>
Promise.resolve({ _id: 'test-file-id', ...fileInfo }),
),
updateFileUsage: jest.fn().mockResolvedValue(),
};
});
jest.mock('~/models/Agent', () => ({
addAgentResourceFile: jest.fn().mockResolvedValue(),
}));
jest.mock('~/server/services/Config/getEndpointsConfig', () => ({
checkCapability: jest.fn().mockResolvedValue(true),
}));
jest.mock('~/server/utils/getFileStrategy', () => ({
getFileStrategy: jest.fn(() => {
return 'local';
}),
}));
jest.mock('~/server/services/Files/VectorDB/crud', () => ({
uploadVectors: jest.fn(({ file_id }) =>
Promise.resolve({
success: true,
vectorIds: [`vector-${file_id}-1`, `vector-${file_id}-2`],
}),
),
}));
jest.mock('~/server/controllers/assistants/helpers', () => ({
getOpenAIClient: jest.fn(),
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
jest.mock('fs', () => ({
...jest.requireActual('fs'),
createReadStream: jest.fn(() => 'mock-stream'),
}));
jest.mock('~/server/utils/queue', () => ({
LB_QueueAsyncCall: jest.fn((fn, args, callback) => {
if (callback) {
callback(null, { success: true });
}
return Promise.resolve({ success: true });
}),
}));
jest.mock('~/server/services/Config/app', () => ({
getAppConfig: jest.fn().mockResolvedValue({
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
}),
}));
jest.mock('~/server/services/Files/images', () => ({
processImageFile: jest.fn().mockResolvedValue({
filepath: '/test/image/path',
width: 800,
height: 600,
}),
handleImageUpload: jest.fn().mockResolvedValue({
filepath: '/test/image/uploaded/path',
bytes: 1024,
width: 800,
height: 600,
}),
}));
describe('File Processing - processAgentFileUpload', () => {
let processAgentFileUpload;
let mockHandleFileUpload;
let mockHandleImageUpload;
let mockCreateFile;
let mockAddAgentResourceFile;
let mockUploadVectors;
let mockCheckCapability;
let mockGetFileStrategy;
beforeAll(() => {
const processModule = require('./process');
processAgentFileUpload = processModule.processAgentFileUpload;
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const mockStrategies = getStrategyFunctions();
mockHandleFileUpload = mockStrategies.handleFileUpload;
mockHandleImageUpload = mockStrategies.handleImageUpload;
mockCreateFile = require('~/models/File').createFile;
mockAddAgentResourceFile = require('~/models/Agent').addAgentResourceFile;
mockUploadVectors = require('~/server/services/Files/VectorDB/crud').uploadVectors;
mockCheckCapability = require('~/server/services/Config/getEndpointsConfig').checkCapability;
mockGetFileStrategy = require('~/server/utils/getFileStrategy').getFileStrategy;
});
beforeEach(() => {
jest.clearAllMocks();
});
describe('processAgentFileUpload', () => {
it('should process image file upload for agent with proper file handling', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('test image data'),
mimetype: 'image/jpeg',
size: 1024,
originalname: 'test-image.jpg',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
agent_id: 'test-agent-id',
tool_resource: EToolResources.image_edit,
file_id: 'test-file-id',
};
await processAgentFileUpload({ req: mockReq, res: mockRes, metadata });
expect(mockGetFileStrategy).toHaveBeenCalledWith(mockReq.config, { isImage: true });
expect(mockHandleImageUpload).toHaveBeenCalledWith(
expect.objectContaining({
req: mockReq,
file: mockReq.file,
file_id: expect.any(String),
}),
);
expect(mockCreateFile).toHaveBeenCalledWith(
expect.objectContaining({
user: 'test-user-id',
file_id: 'test-file-id',
bytes: 1024,
filename: 'test-image.jpg',
context: FileContext.agents,
type: 'image/jpeg',
source: FileSources.local,
width: 800,
height: 600,
}),
true,
);
expect(mockAddAgentResourceFile).toHaveBeenCalledWith(
expect.objectContaining({
agent_id: 'test-agent-id',
file_id: 'test-file-id',
tool_resource: EToolResources.image_edit,
req: mockReq,
}),
);
expect(mockRes.status).toHaveBeenCalledWith(200);
expect(mockRes.json).toHaveBeenCalledWith(
expect.objectContaining({
message: 'Agent file uploaded and processed successfully',
}),
);
});
it('should process file_search tool resource with dual storage (file + vector)', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('test file data'),
mimetype: 'application/pdf',
size: 2048,
originalname: 'test-document.pdf',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
agent_id: 'test-agent-id',
tool_resource: EToolResources.file_search,
file_id: 'test-file-id',
};
await processAgentFileUpload({ req: mockReq, res: mockRes, metadata });
expect(mockGetFileStrategy).toHaveBeenCalledWith(mockReq.config, { isImage: false });
expect(mockHandleFileUpload).toHaveBeenCalledWith({
req: mockReq,
file: mockReq.file,
file_id: 'test-file-id',
basePath: 'uploads',
entity_id: 'test-agent-id',
});
expect(mockUploadVectors).toHaveBeenCalledWith({
req: mockReq,
file: mockReq.file,
file_id: 'test-file-id',
entity_id: 'test-agent-id',
});
expect(mockCreateFile).toHaveBeenCalledWith(
expect.objectContaining({
user: 'test-user-id',
file_id: 'test-file-id',
filename: 'test-document.pdf',
context: FileContext.agents,
type: 'application/pdf',
source: FileSources.local,
bytes: 2048,
filepath: '/uploads/local/test-file-id',
metadata: {},
}),
true,
);
expect(mockAddAgentResourceFile).toHaveBeenCalledWith(
expect.objectContaining({
agent_id: 'test-agent-id',
file_id: 'test-file-id',
tool_resource: EToolResources.file_search,
req: mockReq,
}),
);
});
it('should handle missing tool_resource parameter', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('test file data'),
mimetype: 'application/pdf',
size: 2048,
originalname: 'test-document.pdf',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
agent_id: 'test-agent-id',
file_id: 'test-file-id',
};
await expect(
processAgentFileUpload({ req: mockReq, res: mockRes, metadata }),
).rejects.toThrow('No tool resource provided for agent file upload');
});
it('should handle missing agent_id parameter', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('test file data'),
mimetype: 'application/pdf',
size: 2048,
originalname: 'test-document.pdf',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
tool_resource: EToolResources.file_search,
file_id: 'test-file-id',
};
await expect(
processAgentFileUpload({ req: mockReq, res: mockRes, metadata }),
).rejects.toThrow('No agent ID provided for agent file upload');
});
it('should handle image uploads for non-image tool resources', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('test image data'),
mimetype: 'image/jpeg',
size: 1024,
originalname: 'test-image.jpg',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
agent_id: 'test-agent-id',
tool_resource: EToolResources.file_search,
file_id: 'test-file-id',
};
await expect(
processAgentFileUpload({ req: mockReq, res: mockRes, metadata }),
).rejects.toThrow('Image uploads are not supported for file search tool resources');
});
it('should check execute_code capability and load auth values when processing code files', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('print("hello world")'),
mimetype: 'text/x-python',
size: 20,
originalname: 'test.py',
path: '/tmp/test-file.py',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
agent_id: 'test-agent-id',
tool_resource: EToolResources.execute_code,
file_id: 'test-file-id',
};
const mockLoadAuthValues = require('~/server/services/Tools/credentials').loadAuthValues;
mockLoadAuthValues.mockResolvedValue({ CODE_API_KEY: 'test-key' });
await processAgentFileUpload({ req: mockReq, res: mockRes, metadata });
expect(mockCheckCapability).toHaveBeenCalledWith(mockReq, 'execute_code');
expect(mockLoadAuthValues).toHaveBeenCalledWith({
userId: 'test-user-id',
authFields: ['LIBRECHAT_CODE_API_KEY'],
});
expect(mockHandleFileUpload).toHaveBeenNthCalledWith(
1,
expect.objectContaining({
req: mockReq,
stream: 'mock-stream',
filename: 'test.py',
entity_id: 'test-agent-id',
apiKey: undefined,
}),
);
expect(mockHandleFileUpload).toHaveBeenNthCalledWith(
2,
expect.objectContaining({
req: mockReq,
file: mockReq.file,
file_id: 'test-file-id',
basePath: 'uploads',
entity_id: 'test-agent-id',
}),
);
expect(mockAddAgentResourceFile).toHaveBeenCalledWith(
expect.objectContaining({
agent_id: 'test-agent-id',
file_id: 'test-file-id',
tool_resource: EToolResources.execute_code,
req: mockReq,
}),
);
});
it('should throw error when example capability (execute_code) is not enabled', async () => {
const mockReq = {
user: { id: 'test-user-id' },
file: {
buffer: Buffer.from('print("hello world")'),
mimetype: 'text/x-python',
size: 20,
originalname: 'test.py',
},
body: {
file_id: 'test-file-id',
},
config: {
fileStrategy: 'local',
fileStrategies: {
agents: 'local',
},
imageOutputType: 'jpeg',
},
};
const mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn(),
};
const metadata = {
agent_id: 'test-agent-id',
tool_resource: EToolResources.execute_code,
file_id: 'test-file-id',
};
mockCheckCapability.mockResolvedValueOnce(false);
await expect(
processAgentFileUpload({ req: mockReq, res: mockRes, metadata }),
).rejects.toThrow('Code execution is not enabled for Agents');
expect(mockCheckCapability).toHaveBeenCalledWith(mockReq, 'execute_code');
expect(mockHandleFileUpload).not.toHaveBeenCalled();
expect(mockCreateFile).not.toHaveBeenCalled();
expect(mockAddAgentResourceFile).not.toHaveBeenCalled();
});
});
});

View File

@@ -20,10 +20,10 @@ const {
ContentTypes,
isAssistantsEndpoint,
} = require('librechat-data-provider');
const { getMCPManager, getFlowStateManager, getOAuthReconnectionManager } = require('~/config');
const { findToken, createToken, updateToken } = require('~/models');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { getCachedTools, getAppConfig } = require('./Config');
const { reinitMCPServer } = require('./Tools/mcp');
const { getAppConfig } = require('./Config');
const { getLogStores } = require('~/cache');
/**
@@ -152,8 +152,8 @@ function createOAuthCallback({ runStepEmitter, runStepDeltaEmitter }) {
/**
* @param {Object} params
* @param {ServerRequest} params.req - The Express request object, containing user/request info.
* @param {ServerResponse} params.res - The Express response object for sending events.
* @param {IUser} params.user - The user from the request object.
* @param {string} params.serverName
* @param {AbortSignal} params.signal
* @param {string} params.model
@@ -161,9 +161,9 @@ function createOAuthCallback({ runStepEmitter, runStepDeltaEmitter }) {
* @param {Record<string, Record<string, string>>} [params.userMCPAuthMap]
* @returns { Promise<Array<typeof tool | { _call: (toolInput: Object | string) => unknown}>> } An object with `_call` method to execute the tool input.
*/
async function reconnectServer({ res, user, index, signal, serverName, userMCPAuthMap }) {
async function reconnectServer({ req, res, index, signal, serverName, userMCPAuthMap }) {
const runId = Constants.USE_PRELIM_RESPONSE_MESSAGE_ID;
const flowId = `${user.id}:${serverName}:${Date.now()}`;
const flowId = `${req.user?.id}:${serverName}:${Date.now()}`;
const flowManager = getFlowStateManager(getLogStores(CacheKeys.FLOWS));
const stepId = 'step_oauth_login_' + serverName;
const toolCall = {
@@ -192,7 +192,7 @@ async function reconnectServer({ res, user, index, signal, serverName, userMCPAu
flowManager,
});
return await reinitMCPServer({
user,
req,
signal,
serverName,
oauthStart,
@@ -211,8 +211,8 @@ async function reconnectServer({ res, user, index, signal, serverName, userMCPAu
* i.e. `availableTools`, and will reinitialize the MCP server to ensure all tools are generated.
*
* @param {Object} params
* @param {ServerRequest} params.req - The Express request object, containing user/request info.
* @param {ServerResponse} params.res - The Express response object for sending events.
* @param {IUser} params.user - The user from the request object.
* @param {string} params.serverName
* @param {string} params.model
* @param {Providers | EModelEndpoint} params.provider - The provider for the tool.
@@ -221,8 +221,8 @@ async function reconnectServer({ res, user, index, signal, serverName, userMCPAu
* @param {Record<string, Record<string, string>>} [params.userMCPAuthMap]
* @returns { Promise<Array<typeof tool | { _call: (toolInput: Object | string) => unknown}>> } An object with `_call` method to execute the tool input.
*/
async function createMCPTools({ res, user, index, signal, serverName, provider, userMCPAuthMap }) {
const result = await reconnectServer({ res, user, index, signal, serverName, userMCPAuthMap });
async function createMCPTools({ req, res, index, signal, serverName, provider, userMCPAuthMap }) {
const result = await reconnectServer({ req, res, index, signal, serverName, userMCPAuthMap });
if (!result || !result.tools) {
logger.warn(`[MCP][${serverName}] Failed to reinitialize MCP server.`);
return;
@@ -231,8 +231,8 @@ async function createMCPTools({ res, user, index, signal, serverName, provider,
const serverTools = [];
for (const tool of result.tools) {
const toolInstance = await createMCPTool({
req,
res,
user,
provider,
userMCPAuthMap,
availableTools: result.availableTools,
@@ -249,8 +249,8 @@ async function createMCPTools({ res, user, index, signal, serverName, provider,
/**
* Creates a single tool from the specified MCP Server via `toolKey`.
* @param {Object} params
* @param {ServerRequest} params.req - The Express request object, containing user/request info.
* @param {ServerResponse} params.res - The Express response object for sending events.
* @param {IUser} params.user - The user from the request object.
* @param {string} params.toolKey - The toolKey for the tool.
* @param {string} params.model - The model for the tool.
* @param {number} [params.index]
@@ -261,31 +261,26 @@ async function createMCPTools({ res, user, index, signal, serverName, provider,
* @returns { Promise<typeof tool | { _call: (toolInput: Object | string) => unknown}> } An object with `_call` method to execute the tool input.
*/
async function createMCPTool({
req,
res,
user,
index,
signal,
toolKey,
provider,
userMCPAuthMap,
availableTools,
availableTools: tools,
}) {
const [toolName, serverName] = toolKey.split(Constants.mcp_delimiter);
const availableTools =
tools ?? (await getCachedTools({ userId: req.user?.id, includeGlobal: true }));
/** @type {LCTool | undefined} */
let toolDefinition = availableTools?.[toolKey]?.function;
if (!toolDefinition) {
logger.warn(
`[MCP][${serverName}][${toolName}] Requested tool not found in available tools, re-initializing MCP server.`,
);
const result = await reconnectServer({
res,
user,
index,
signal,
serverName,
userMCPAuthMap,
});
const result = await reconnectServer({ req, res, index, signal, serverName, userMCPAuthMap });
toolDefinition = result?.availableTools?.[toolKey]?.function;
}
@@ -442,10 +437,10 @@ async function getMCPSetupData(userId) {
}
const mcpManager = getMCPManager(userId);
/** @type {Map<string, import('@librechat/api').MCPConnection>} */
/** @type {ReturnType<MCPManager['getAllConnections']>} */
let appConnections = new Map();
try {
appConnections = (await mcpManager.appConnections?.getAll()) || new Map();
appConnections = (await mcpManager.getAllConnections()) || new Map();
} catch (error) {
logger.error(`[MCP][User: ${userId}] Error getting app connections:`, error);
}
@@ -543,20 +538,13 @@ async function getServerConnectionStatus(
const baseConnectionState = getConnectionState();
let finalConnectionState = baseConnectionState;
// connection state overrides specific to OAuth servers
if (baseConnectionState === 'disconnected' && oauthServers.has(serverName)) {
// check if server is actively being reconnected
const oauthReconnectionManager = getOAuthReconnectionManager();
if (oauthReconnectionManager.isReconnecting(userId, serverName)) {
finalConnectionState = 'connecting';
} else {
const { hasActiveFlow, hasFailedFlow } = await checkOAuthFlowStatus(userId, serverName);
const { hasActiveFlow, hasFailedFlow } = await checkOAuthFlowStatus(userId, serverName);
if (hasFailedFlow) {
finalConnectionState = 'error';
} else if (hasActiveFlow) {
finalConnectionState = 'connecting';
}
if (hasFailedFlow) {
finalConnectionState = 'error';
} else if (hasActiveFlow) {
finalConnectionState = 'connecting';
}
}

View File

@@ -1,45 +1,13 @@
const { logger } = require('@librechat/data-schemas');
const { MCPOAuthHandler } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
const {
createMCPTool,
createMCPTools,
getMCPSetupData,
checkOAuthFlowStatus,
getServerConnectionStatus,
} = require('./MCP');
const { getMCPSetupData, checkOAuthFlowStatus, getServerConnectionStatus } = require('./MCP');
// Mock all dependencies
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
info: jest.fn(),
warn: jest.fn(),
},
}));
jest.mock('@langchain/core/tools', () => ({
tool: jest.fn((fn, config) => {
const toolInstance = { _call: fn, ...config };
return toolInstance;
}),
}));
jest.mock('@librechat/agents', () => ({
Providers: {
VERTEXAI: 'vertexai',
GOOGLE: 'google',
},
StepTypes: {
TOOL_CALLS: 'tool_calls',
},
GraphEvents: {
ON_RUN_STEP_DELTA: 'on_run_step_delta',
ON_RUN_STEP: 'on_run_step',
},
Constants: {
CONTENT_AND_ARTIFACT: 'content_and_artifact',
},
}));
@@ -47,27 +15,12 @@ jest.mock('@librechat/api', () => ({
MCPOAuthHandler: {
generateFlowId: jest.fn(),
},
sendEvent: jest.fn(),
normalizeServerName: jest.fn((name) => name),
convertWithResolvedRefs: jest.fn((params) => params),
}));
jest.mock('librechat-data-provider', () => ({
CacheKeys: {
FLOWS: 'flows',
},
Constants: {
USE_PRELIM_RESPONSE_MESSAGE_ID: 'prelim_response_id',
mcp_delimiter: '::',
mcp_prefix: 'mcp_',
},
ContentTypes: {
TEXT: 'text',
},
isAssistantsEndpoint: jest.fn(() => false),
Time: {
TWO_MINUTES: 120000,
},
}));
jest.mock('./Config', () => ({
@@ -78,7 +31,6 @@ jest.mock('./Config', () => ({
jest.mock('~/config', () => ({
getMCPManager: jest.fn(),
getFlowStateManager: jest.fn(),
getOAuthReconnectionManager: jest.fn(),
}));
jest.mock('~/cache', () => ({
@@ -91,23 +43,19 @@ jest.mock('~/models', () => ({
updateToken: jest.fn(),
}));
jest.mock('./Tools/mcp', () => ({
reinitMCPServer: jest.fn(),
}));
describe('tests for the new helper functions used by the MCP connection status endpoints', () => {
let mockLoadCustomConfig;
let mockGetMCPManager;
let mockGetFlowStateManager;
let mockGetLogStores;
let mockGetOAuthReconnectionManager;
beforeEach(() => {
jest.clearAllMocks();
mockLoadCustomConfig = require('./Config').loadCustomConfig;
mockGetMCPManager = require('~/config').getMCPManager;
mockGetFlowStateManager = require('~/config').getFlowStateManager;
mockGetLogStores = require('~/cache').getLogStores;
mockGetOAuthReconnectionManager = require('~/config').getOAuthReconnectionManager;
});
describe('getMCPSetupData', () => {
@@ -123,7 +71,7 @@ describe('tests for the new helper functions used by the MCP connection status e
beforeEach(() => {
mockGetAppConfig = require('./Config').getAppConfig;
mockGetMCPManager.mockReturnValue({
appConnections: { getAll: jest.fn(() => new Map()) },
getAllConnections: jest.fn(() => new Map()),
getUserConnections: jest.fn(() => new Map()),
getOAuthServers: jest.fn(() => new Set()),
});
@@ -137,7 +85,7 @@ describe('tests for the new helper functions used by the MCP connection status e
const mockOAuthServers = new Set(['server2']);
const mockMCPManager = {
appConnections: { getAll: jest.fn(() => mockAppConnections) },
getAllConnections: jest.fn(() => mockAppConnections),
getUserConnections: jest.fn(() => mockUserConnections),
getOAuthServers: jest.fn(() => mockOAuthServers),
};
@@ -147,7 +95,7 @@ describe('tests for the new helper functions used by the MCP connection status e
expect(mockGetAppConfig).toHaveBeenCalled();
expect(mockGetMCPManager).toHaveBeenCalledWith(mockUserId);
expect(mockMCPManager.appConnections.getAll).toHaveBeenCalled();
expect(mockMCPManager.getAllConnections).toHaveBeenCalled();
expect(mockMCPManager.getUserConnections).toHaveBeenCalledWith(mockUserId);
expect(mockMCPManager.getOAuthServers).toHaveBeenCalled();
@@ -168,7 +116,7 @@ describe('tests for the new helper functions used by the MCP connection status e
mockGetAppConfig.mockResolvedValue({ mcpConfig: mockConfig.mcpServers });
const mockMCPManager = {
appConnections: { getAll: jest.fn(() => null) },
getAllConnections: jest.fn(() => null),
getUserConnections: jest.fn(() => null),
getOAuthServers: jest.fn(() => null),
};
@@ -406,12 +354,6 @@ describe('tests for the new helper functions used by the MCP connection status e
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
@@ -428,12 +370,6 @@ describe('tests for the new helper functions used by the MCP connection status e
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
// Mock flow state to return failed flow
const mockFlowManager = {
getFlowState: jest.fn(() => ({
@@ -465,12 +401,6 @@ describe('tests for the new helper functions used by the MCP connection status e
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
// Mock flow state to return active flow
const mockFlowManager = {
getFlowState: jest.fn(() => ({
@@ -502,12 +432,6 @@ describe('tests for the new helper functions used by the MCP connection status e
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => false),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
// Mock flow state to return no flow
const mockFlowManager = {
getFlowState: jest.fn(() => null),
@@ -530,35 +454,6 @@ describe('tests for the new helper functions used by the MCP connection status e
});
});
it('should return connecting state when OAuth server is reconnecting', async () => {
const appConnections = new Map();
const userConnections = new Map();
const oauthServers = new Set([mockServerName]);
// Mock OAuthReconnectionManager to return true for isReconnecting
const mockOAuthReconnectionManager = {
isReconnecting: jest.fn(() => true),
};
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
const result = await getServerConnectionStatus(
mockUserId,
mockServerName,
appConnections,
userConnections,
oauthServers,
);
expect(result).toEqual({
requiresOAuth: true,
connectionState: 'connecting',
});
expect(mockOAuthReconnectionManager.isReconnecting).toHaveBeenCalledWith(
mockUserId,
mockServerName,
);
});
it('should not check OAuth flow status when server is connected', async () => {
const mockFlowManager = {
getFlowState: jest.fn(),
@@ -616,275 +511,3 @@ describe('tests for the new helper functions used by the MCP connection status e
});
});
});
describe('User parameter passing tests', () => {
let mockReinitMCPServer;
let mockGetFlowStateManager;
let mockGetLogStores;
beforeEach(() => {
jest.clearAllMocks();
mockReinitMCPServer = require('./Tools/mcp').reinitMCPServer;
mockGetFlowStateManager = require('~/config').getFlowStateManager;
mockGetLogStores = require('~/cache').getLogStores;
// Setup default mocks
mockGetLogStores.mockReturnValue({});
mockGetFlowStateManager.mockReturnValue({
createFlowWithHandler: jest.fn(),
failFlow: jest.fn(),
});
});
describe('createMCPTools', () => {
it('should pass user parameter to reinitMCPServer when calling reconnectServer internally', async () => {
const mockUser = { id: 'test-user-123', name: 'Test User' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
const mockSignal = new AbortController().signal;
mockReinitMCPServer.mockResolvedValue({
tools: [{ name: 'test-tool' }],
availableTools: {
'test-tool::test-server': {
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
},
});
await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'test-server',
provider: 'openai',
signal: mockSignal,
userMCPAuthMap: {},
});
// Verify reinitMCPServer was called with the user
expect(mockReinitMCPServer).toHaveBeenCalledWith(
expect.objectContaining({
user: mockUser,
serverName: 'test-server',
}),
);
expect(mockReinitMCPServer.mock.calls[0][0].user).toBe(mockUser);
});
it('should throw error if user is not provided', async () => {
const mockRes = { write: jest.fn(), flush: jest.fn() };
mockReinitMCPServer.mockResolvedValue({
tools: [],
availableTools: {},
});
// Call without user should throw error
await expect(
createMCPTools({
res: mockRes,
user: undefined,
serverName: 'test-server',
provider: 'openai',
userMCPAuthMap: {},
}),
).rejects.toThrow("Cannot read properties of undefined (reading 'id')");
// Verify reinitMCPServer was not called due to early error
expect(mockReinitMCPServer).not.toHaveBeenCalled();
});
});
describe('createMCPTool', () => {
it('should pass user parameter to reinitMCPServer when tool not in cache', async () => {
const mockUser = { id: 'test-user-456', email: 'test@example.com' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
const mockSignal = new AbortController().signal;
mockReinitMCPServer.mockResolvedValue({
availableTools: {
'test-tool::test-server': {
function: {
description: 'Test tool',
parameters: { type: 'object', properties: {} },
},
},
},
});
// Call without availableTools to trigger reinit
await createMCPTool({
res: mockRes,
user: mockUser,
toolKey: 'test-tool::test-server',
provider: 'openai',
signal: mockSignal,
userMCPAuthMap: {},
availableTools: undefined, // Force reinit
});
// Verify reinitMCPServer was called with the user
expect(mockReinitMCPServer).toHaveBeenCalledWith(
expect.objectContaining({
user: mockUser,
serverName: 'test-server',
}),
);
expect(mockReinitMCPServer.mock.calls[0][0].user).toBe(mockUser);
});
it('should not call reinitMCPServer when tool is in cache', async () => {
const mockUser = { id: 'test-user-789' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
const availableTools = {
'test-tool::test-server': {
function: {
description: 'Cached tool',
parameters: { type: 'object', properties: {} },
},
},
};
await createMCPTool({
res: mockRes,
user: mockUser,
toolKey: 'test-tool::test-server',
provider: 'openai',
userMCPAuthMap: {},
availableTools: availableTools,
});
// Verify reinitMCPServer was NOT called since tool was in cache
expect(mockReinitMCPServer).not.toHaveBeenCalled();
});
});
describe('reinitMCPServer (via reconnectServer)', () => {
it('should always receive user parameter when called from createMCPTools', async () => {
const mockUser = { id: 'user-001', role: 'admin' };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Track all calls to reinitMCPServer
const reinitCalls = [];
mockReinitMCPServer.mockImplementation((params) => {
reinitCalls.push(params);
return Promise.resolve({
tools: [{ name: 'tool1' }, { name: 'tool2' }],
availableTools: {
'tool1::server1': { function: { description: 'Tool 1', parameters: {} } },
'tool2::server1': { function: { description: 'Tool 2', parameters: {} } },
},
});
});
await createMCPTools({
res: mockRes,
user: mockUser,
serverName: 'server1',
provider: 'anthropic',
userMCPAuthMap: {},
});
// Verify all calls to reinitMCPServer had the user
expect(reinitCalls.length).toBeGreaterThan(0);
reinitCalls.forEach((call) => {
expect(call.user).toBe(mockUser);
expect(call.user.id).toBe('user-001');
});
});
it('should always receive user parameter when called from createMCPTool', async () => {
const mockUser = { id: 'user-002', permissions: ['read', 'write'] };
const mockRes = { write: jest.fn(), flush: jest.fn() };
// Track all calls to reinitMCPServer
const reinitCalls = [];
mockReinitMCPServer.mockImplementation((params) => {
reinitCalls.push(params);
return Promise.resolve({
availableTools: {
'my-tool::my-server': {
function: { description: 'My Tool', parameters: {} },
},
},
});
});
await createMCPTool({
res: mockRes,
user: mockUser,
toolKey: 'my-tool::my-server',
provider: 'google',
userMCPAuthMap: {},
availableTools: undefined, // Force reinit
});
// Verify the call to reinitMCPServer had the user
expect(reinitCalls.length).toBe(1);
expect(reinitCalls[0].user).toBe(mockUser);
expect(reinitCalls[0].user.id).toBe('user-002');
});
});
describe('User parameter integrity', () => {
it('should preserve user object properties through the call chain', async () => {
const complexUser = {
id: 'complex-user',
name: 'John Doe',
email: 'john@example.com',
metadata: { subscription: 'premium', settings: { theme: 'dark' } },
};
const mockRes = { write: jest.fn(), flush: jest.fn() };
let capturedUser = null;
mockReinitMCPServer.mockImplementation((params) => {
capturedUser = params.user;
return Promise.resolve({
tools: [{ name: 'test' }],
availableTools: {
'test::server': { function: { description: 'Test', parameters: {} } },
},
});
});
await createMCPTools({
res: mockRes,
user: complexUser,
serverName: 'server',
provider: 'openai',
userMCPAuthMap: {},
});
// Verify the complete user object was passed
expect(capturedUser).toEqual(complexUser);
expect(capturedUser.id).toBe('complex-user');
expect(capturedUser.metadata.subscription).toBe('premium');
expect(capturedUser.metadata.settings.theme).toBe('dark');
});
it('should throw error when user is null', async () => {
const mockRes = { write: jest.fn(), flush: jest.fn() };
mockReinitMCPServer.mockResolvedValue({
tools: [],
availableTools: {},
});
await expect(
createMCPTools({
res: mockRes,
user: null,
serverName: 'test-server',
provider: 'openai',
userMCPAuthMap: {},
}),
).rejects.toThrow("Cannot read properties of null (reading 'id')");
// Verify reinitMCPServer was not called due to early error
expect(mockReinitMCPServer).not.toHaveBeenCalled();
});
});
});

View File

@@ -313,7 +313,7 @@ const ensurePrincipalExists = async function (principal) {
idOnTheSource: principal.idOnTheSource,
};
const userId = await createUser(userData, true, true);
const userId = await createUser(userData, true, false);
return userId.toString();
}

View File

@@ -1,12 +1,7 @@
const { sleep } = require('@librechat/agents');
const { logger } = require('@librechat/data-schemas');
const { tool: toolFn, DynamicStructuredTool } = require('@langchain/core/tools');
const {
getToolkitKey,
hasCustomUserVars,
getUserMCPAuthMap,
isActionDomainAllowed,
} = require('@librechat/api');
const { getToolkitKey, hasCustomUserVars, getUserMCPAuthMap } = require('@librechat/api');
const {
Tools,
Constants,
@@ -31,6 +26,7 @@ const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/p
const { getEndpointsConfig, getCachedTools } = require('~/server/services/Config');
const { manifestToolMap, toolkits } = require('~/app/clients/tools/manifest');
const { createOnSearchResults } = require('~/server/services/Tools/search');
const { isActionDomainAllowed } = require('~/server/services/domains');
const { recordUsage } = require('~/server/services/Threads');
const { loadTools } = require('~/app/clients/tools/util');
const { redactMessage } = require('~/config/parsers');
@@ -78,7 +74,7 @@ async function processRequiredActions(client, requiredActions) {
requiredActions,
);
const appConfig = client.req.config;
const toolDefinitions = await getCachedTools();
const toolDefinitions = await getCachedTools({ userId: client.req.user.id, includeGlobal: true });
const seenToolkits = new Set();
const tools = requiredActions
.map((action) => {
@@ -357,12 +353,7 @@ async function processRequiredActions(client, requiredActions) {
async function loadAgentTools({ req, res, agent, signal, tool_resources, openAIApiKey }) {
if (!agent.tools || agent.tools.length === 0) {
return {};
} else if (
agent.tools &&
agent.tools.length === 1 &&
/** Legacy handling for `ocr` as may still exist in existing Agents */
(agent.tools[0] === AgentCapabilities.context || agent.tools[0] === AgentCapabilities.ocr)
) {
} else if (agent.tools && agent.tools.length === 1 && agent.tools[0] === AgentCapabilities.ocr) {
return {};
}

View File

@@ -2,12 +2,12 @@ const { logger } = require('@librechat/data-schemas');
const { CacheKeys, Constants } = require('librechat-data-provider');
const { findToken, createToken, updateToken, deleteTokens } = require('~/models');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { updateMCPServerTools } = require('~/server/services/Config');
const { updateMCPUserTools } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
/**
* @param {Object} params
* @param {IUser} params.user - The user from the request object.
* @param {ServerRequest} params.req
* @param {string} params.serverName - The name of the MCP server
* @param {boolean} params.returnOnOAuth - Whether to initiate OAuth and return, or wait for OAuth flow to finish
* @param {AbortSignal} [params.signal] - The abort signal to handle cancellation.
@@ -18,7 +18,7 @@ const { getLogStores } = require('~/cache');
* @param {Record<string, Record<string, string>>} [params.userMCPAuthMap]
*/
async function reinitMCPServer({
user,
req,
signal,
forceNew,
serverName,
@@ -29,7 +29,7 @@ async function reinitMCPServer({
flowManager: _flowManager,
}) {
/** @type {MCPConnection | null} */
let connection = null;
let userConnection = null;
/** @type {LCAvailableTools | null} */
let availableTools = null;
/** @type {ReturnType<MCPConnection['fetchTools']> | null} */
@@ -44,14 +44,14 @@ async function reinitMCPServer({
const oauthStart =
_oauthStart ??
(async (authURL) => {
logger.info(`[MCP Reinitialize] OAuth URL received for ${serverName}`);
logger.info(`[MCP Reinitialize] OAuth URL received: ${authURL}`);
oauthUrl = authURL;
oauthRequired = true;
});
try {
connection = await mcpManager.getConnection({
user,
userConnection = await mcpManager.getUserConnection({
user: req.user,
signal,
forceNew,
oauthStart,
@@ -70,7 +70,7 @@ async function reinitMCPServer({
logger.info(`[MCP Reinitialize] Successfully established connection for ${serverName}`);
} catch (err) {
logger.info(`[MCP Reinitialize] getConnection threw error: ${err.message}`);
logger.info(`[MCP Reinitialize] getUserConnection threw error: ${err.message}`);
logger.info(
`[MCP Reinitialize] OAuth state - oauthRequired: ${oauthRequired}, oauthUrl: ${oauthUrl ? 'present' : 'null'}`,
);
@@ -95,9 +95,10 @@ async function reinitMCPServer({
}
}
if (connection && !oauthRequired) {
tools = await connection.fetchTools();
availableTools = await updateMCPServerTools({
if (userConnection && !oauthRequired) {
tools = await userConnection.fetchTools();
availableTools = await updateMCPUserTools({
userId: req.user.id,
serverName,
tools,
});
@@ -111,7 +112,7 @@ async function reinitMCPServer({
if (oauthRequired) {
return `MCP server '${serverName}' ready for OAuth authentication`;
}
if (connection) {
if (userConnection) {
return `MCP server '${serverName}' reinitialized successfully`;
}
return `Failed to reinitialize MCP server '${serverName}'`;
@@ -119,7 +120,7 @@ async function reinitMCPServer({
const result = {
availableTools,
success: Boolean((connection && !oauthRequired) || (oauthRequired && oauthUrl)),
success: Boolean((userConnection && !oauthRequired) || (oauthRequired && oauthUrl)),
message: getResponseMessage(),
oauthRequired,
serverName,

View File

@@ -1,13 +1,14 @@
/**
* @param email
* @param allowedDomains
* @param {string} email
* @param {string[]} [allowedDomains]
* @returns {boolean}
*/
export function isEmailDomainAllowed(email: string, allowedDomains?: string[] | null): boolean {
function isEmailDomainAllowed(email, allowedDomains) {
if (!email) {
return false;
}
const domain = email.split('@')[1]?.toLowerCase();
const domain = email.split('@')[1];
if (!domain) {
return false;
@@ -19,15 +20,21 @@ export function isEmailDomainAllowed(email: string, allowedDomains?: string[] |
return true;
}
return allowedDomains.some((allowedDomain) => allowedDomain?.toLowerCase() === domain);
return allowedDomains.includes(domain);
}
/**
* Normalizes a domain string
* @param {string} domain
* @returns {string|null}
*/
/**
* Normalizes a domain string. If the domain is invalid, returns null.
* Normalized === lowercase, trimmed, and protocol added if missing.
* @param domain
* @param {string} domain
* @returns {string|null}
*/
function normalizeDomain(domain: string): string | null {
function normalizeDomain(domain) {
try {
let normalizedDomain = domain.toLowerCase().trim();
@@ -55,13 +62,11 @@ function normalizeDomain(domain: string): string | null {
/**
* Checks if the given domain is allowed. If no restrictions are set, allows all domains.
* @param domain
* @param allowedDomains
* @param {string} [domain]
* @param {string[]} [allowedDomains]
* @returns {Promise<boolean>}
*/
export async function isActionDomainAllowed(
domain?: string | null,
allowedDomains?: string[] | null,
): Promise<boolean> {
async function isActionDomainAllowed(domain, allowedDomains) {
if (!domain || typeof domain !== 'string') {
return false;
}
@@ -96,3 +101,5 @@ export async function isActionDomainAllowed(
return false;
}
module.exports = { isEmailDomainAllowed, isActionDomainAllowed };

View File

@@ -1,5 +1,9 @@
/* eslint-disable @typescript-eslint/ban-ts-comment */
import { isEmailDomainAllowed, isActionDomainAllowed } from './domain';
const { isEmailDomainAllowed, isActionDomainAllowed } = require('~/server/services/domains');
const { getAppConfig } = require('~/server/services/Config');
jest.mock('~/server/services/Config', () => ({
getAppConfig: jest.fn(),
}));
describe('isEmailDomainAllowed', () => {
afterEach(() => {
@@ -20,72 +24,39 @@ describe('isEmailDomainAllowed', () => {
it('should return true if customConfig is not available', async () => {
const email = 'test@domain1.com';
getAppConfig.mockResolvedValue(null);
const result = isEmailDomainAllowed(email, null);
expect(result).toBe(true);
});
it('should return true if allowedDomains is not defined in customConfig', async () => {
const email = 'test@domain1.com';
getAppConfig.mockResolvedValue({});
const result = isEmailDomainAllowed(email, undefined);
expect(result).toBe(true);
});
it('should return true if domain is included in the allowedDomains', async () => {
const email = 'user@domain1.com';
getAppConfig.mockResolvedValue({
registration: {
allowedDomains: ['domain1.com', 'domain2.com'],
},
});
const result = isEmailDomainAllowed(email, ['domain1.com', 'domain2.com']);
expect(result).toBe(true);
});
it('should return false if domain is not included in the allowedDomains', async () => {
const email = 'user@domain3.com';
getAppConfig.mockResolvedValue({
registration: {
allowedDomains: ['domain1.com', 'domain2.com'],
},
});
const result = isEmailDomainAllowed(email, ['domain1.com', 'domain2.com']);
expect(result).toBe(false);
});
describe('case-insensitive domain matching', () => {
it('should match domains case-insensitively when email has uppercase domain', () => {
const email = 'user@DOMAIN1.COM';
const result = isEmailDomainAllowed(email, ['domain1.com', 'domain2.com']);
expect(result).toBe(true);
});
it('should match domains case-insensitively when allowedDomains has uppercase', () => {
const email = 'user@domain1.com';
const result = isEmailDomainAllowed(email, ['DOMAIN1.COM', 'DOMAIN2.COM']);
expect(result).toBe(true);
});
it('should match domains with mixed case in email', () => {
const email = 'user@Example.Com';
const result = isEmailDomainAllowed(email, ['example.com', 'domain2.com']);
expect(result).toBe(true);
});
it('should match domains with mixed case in allowedDomains', () => {
const email = 'user@example.com';
const result = isEmailDomainAllowed(email, ['Example.Com', 'Domain2.Com']);
expect(result).toBe(true);
});
it('should match when both email and allowedDomains have different mixed cases', () => {
const email = 'user@ExAmPlE.cOm';
const result = isEmailDomainAllowed(email, ['eXaMpLe.CoM', 'domain2.com']);
expect(result).toBe(true);
});
it('should still return false for non-matching domains regardless of case', () => {
const email = 'user@DOMAIN3.COM';
const result = isEmailDomainAllowed(email, ['domain1.com', 'DOMAIN2.COM']);
expect(result).toBe(false);
});
it('should handle null/undefined entries in allowedDomains gracefully', () => {
const email = 'user@domain1.com';
// @ts-expect-error Testing invalid input
const result = isEmailDomainAllowed(email, [null, 'DOMAIN1.COM', undefined]);
expect(result).toBe(true);
});
});
});
describe('isActionDomainAllowed', () => {
@@ -103,15 +74,15 @@ describe('isActionDomainAllowed', () => {
});
it('should return false for non-string inputs', async () => {
/** @ts-expect-error */
expect(await isActionDomainAllowed(123)).toBe(false);
/** @ts-expect-error */
expect(await isActionDomainAllowed({})).toBe(false);
/** @ts-expect-error */
expect(await isActionDomainAllowed([])).toBe(false);
});
it('should return false for invalid domain formats', async () => {
getAppConfig.mockResolvedValue({
actions: { allowedDomains: ['http://', 'https://'] },
});
expect(await isActionDomainAllowed('http://', ['http://', 'https://'])).toBe(false);
expect(await isActionDomainAllowed('https://', ['http://', 'https://'])).toBe(false);
});
@@ -120,14 +91,19 @@ describe('isActionDomainAllowed', () => {
// Configuration Tests
describe('configuration handling', () => {
it('should return true if customConfig is null', async () => {
getAppConfig.mockResolvedValue(null);
expect(await isActionDomainAllowed('example.com', null)).toBe(true);
});
it('should return true if actions.allowedDomains is not defined', async () => {
getAppConfig.mockResolvedValue({});
expect(await isActionDomainAllowed('example.com', undefined)).toBe(true);
});
it('should return true if allowedDomains is empty array', async () => {
getAppConfig.mockResolvedValue({
actions: { allowedDomains: [] },
});
expect(await isActionDomainAllowed('example.com', [])).toBe(true);
});
});
@@ -142,6 +118,14 @@ describe('isActionDomainAllowed', () => {
'swapi.dev',
];
beforeEach(() => {
getAppConfig.mockResolvedValue({
actions: {
allowedDomains,
},
});
});
it('should match exact domains', async () => {
expect(await isActionDomainAllowed('example.com', allowedDomains)).toBe(true);
expect(await isActionDomainAllowed('other.com', allowedDomains)).toBe(false);
@@ -175,6 +159,14 @@ describe('isActionDomainAllowed', () => {
describe('edge cases', () => {
const edgeAllowedDomains = ['example.com', '*.test.com'];
beforeEach(() => {
getAppConfig.mockResolvedValue({
actions: {
allowedDomains: edgeAllowedDomains,
},
});
});
it('should handle domains with query parameters', async () => {
expect(await isActionDomainAllowed('example.com?param=value', edgeAllowedDomains)).toBe(true);
});
@@ -194,9 +186,12 @@ describe('isActionDomainAllowed', () => {
it('should handle invalid entries in allowedDomains', async () => {
const invalidAllowedDomains = ['example.com', null, undefined, '', 'test.com'];
/** @ts-expect-error */
getAppConfig.mockResolvedValue({
actions: {
allowedDomains: invalidAllowedDomains,
},
});
expect(await isActionDomainAllowed('example.com', invalidAllowedDomains)).toBe(true);
/** @ts-expect-error */
expect(await isActionDomainAllowed('test.com', invalidAllowedDomains)).toBe(true);
});
});

View File

@@ -1,26 +0,0 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys } = require('librechat-data-provider');
const { createOAuthReconnectionManager, getFlowStateManager } = require('~/config');
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
const { getLogStores } = require('~/cache');
/**
* Initialize OAuth reconnect manager
*/
async function initializeOAuthReconnectManager() {
try {
const flowManager = getFlowStateManager(getLogStores(CacheKeys.FLOWS));
const tokenMethods = {
findToken,
updateToken,
createToken,
deleteTokens,
};
await createOAuthReconnectionManager(flowManager, tokenMethods);
logger.info(`OAuth reconnect manager initialized successfully.`);
} catch (error) {
logger.error('Failed to initialize OAuth reconnect manager:', error);
}
}
module.exports = initializeOAuthReconnectManager;

View File

@@ -10,10 +10,6 @@ jest.mock('~/models/Message', () => ({
bulkSaveMessages: jest.fn(),
}));
jest.mock('~/models/ConversationTag', () => ({
bulkIncrementTagCounts: jest.fn(),
}));
let mockIdCounter = 0;
jest.mock('uuid', () => {
return {
@@ -26,13 +22,11 @@ jest.mock('uuid', () => {
const {
forkConversation,
duplicateConversation,
splitAtTargetLevel,
getAllMessagesUpToParent,
getMessagesUpToTargetLevel,
cloneMessagesWithTimestamps,
} = require('./fork');
const { bulkIncrementTagCounts } = require('~/models/ConversationTag');
const { getConvo, bulkSaveConvos } = require('~/models/Conversation');
const { getMessages, bulkSaveMessages } = require('~/models/Message');
const { createImportBatchBuilder } = require('./importBatchBuilder');
@@ -187,120 +181,6 @@ describe('forkConversation', () => {
}),
).rejects.toThrow('Failed to fetch messages');
});
test('should increment tag counts when forking conversation with tags', async () => {
const mockConvoWithTags = {
...mockConversation,
tags: ['bookmark1', 'bookmark2'],
};
getConvo.mockResolvedValue(mockConvoWithTags);
await forkConversation({
originalConvoId: 'abc123',
targetMessageId: '3',
requestUserId: 'user1',
option: ForkOptions.DIRECT_PATH,
});
// Verify that bulkIncrementTagCounts was called with correct tags
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', ['bookmark1', 'bookmark2']);
});
test('should handle conversation without tags when forking', async () => {
const mockConvoWithoutTags = {
...mockConversation,
// No tags field
};
getConvo.mockResolvedValue(mockConvoWithoutTags);
await forkConversation({
originalConvoId: 'abc123',
targetMessageId: '3',
requestUserId: 'user1',
option: ForkOptions.DIRECT_PATH,
});
// bulkIncrementTagCounts will be called with array containing undefined
expect(bulkIncrementTagCounts).toHaveBeenCalled();
});
test('should handle empty tags array when forking', async () => {
const mockConvoWithEmptyTags = {
...mockConversation,
tags: [],
};
getConvo.mockResolvedValue(mockConvoWithEmptyTags);
await forkConversation({
originalConvoId: 'abc123',
targetMessageId: '3',
requestUserId: 'user1',
option: ForkOptions.DIRECT_PATH,
});
// bulkIncrementTagCounts will be called with empty array
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', []);
});
});
describe('duplicateConversation', () => {
beforeEach(() => {
jest.clearAllMocks();
mockIdCounter = 0;
getConvo.mockResolvedValue(mockConversation);
getMessages.mockResolvedValue(mockMessages);
bulkSaveConvos.mockResolvedValue(null);
bulkSaveMessages.mockResolvedValue(null);
bulkIncrementTagCounts.mockResolvedValue(null);
});
test('should duplicate conversation and increment tag counts', async () => {
const mockConvoWithTags = {
...mockConversation,
tags: ['important', 'work', 'project'],
};
getConvo.mockResolvedValue(mockConvoWithTags);
await duplicateConversation({
userId: 'user1',
conversationId: 'abc123',
});
// Verify that bulkIncrementTagCounts was called with correct tags
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', ['important', 'work', 'project']);
});
test('should duplicate conversation without tags', async () => {
const mockConvoWithoutTags = {
...mockConversation,
// No tags field
};
getConvo.mockResolvedValue(mockConvoWithoutTags);
await duplicateConversation({
userId: 'user1',
conversationId: 'abc123',
});
// bulkIncrementTagCounts will be called with array containing undefined
expect(bulkIncrementTagCounts).toHaveBeenCalled();
});
test('should handle empty tags array when duplicating', async () => {
const mockConvoWithEmptyTags = {
...mockConversation,
tags: [],
};
getConvo.mockResolvedValue(mockConvoWithEmptyTags);
await duplicateConversation({
userId: 'user1',
conversationId: 'abc123',
});
// bulkIncrementTagCounts will be called with empty array
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', []);
});
});
const mockMessagesComplex = [

View File

@@ -1,6 +1,5 @@
const { v4: uuidv4 } = require('uuid');
const { EModelEndpoint, Constants, openAISettings } = require('librechat-data-provider');
const { bulkIncrementTagCounts } = require('~/models/ConversationTag');
const { bulkSaveConvos } = require('~/models/Conversation');
const { bulkSaveMessages } = require('~/models/Message');
const { logger } = require('~/config');
@@ -94,22 +93,13 @@ class ImportBatchBuilder {
/**
* Saves the batch of conversations and messages to the DB.
* Also increments tag counts for any existing tags.
* @returns {Promise<void>} A promise that resolves when the batch is saved.
* @throws {Error} If there is an error saving the batch.
*/
async saveBatch() {
try {
const promises = [];
promises.push(bulkSaveConvos(this.conversations));
promises.push(bulkSaveMessages(this.messages, true));
promises.push(
bulkIncrementTagCounts(
this.requestUserId,
this.conversations.flatMap((convo) => convo.tags),
),
);
await Promise.all(promises);
await bulkSaveConvos(this.conversations);
await bulkSaveMessages(this.messages, true);
logger.debug(
`user: ${this.requestUserId} | Added ${this.conversations.length} conversations and ${this.messages.length} messages to the DB.`,
);

View File

@@ -1,10 +1,11 @@
const fs = require('fs');
const path = require('path');
const axios = require('axios');
const FormData = require('form-data');
const nodemailer = require('nodemailer');
const handlebars = require('handlebars');
const { logger } = require('@librechat/data-schemas');
const { logAxiosError, isEnabled, readFileAsString } = require('@librechat/api');
const { logAxiosError, isEnabled } = require('@librechat/api');
/**
* Sends an email using Mailgun API.
@@ -92,7 +93,8 @@ const sendEmailViaSMTP = async ({ transporterOptions, mailOptions }) => {
*/
const sendEmail = async ({ email, subject, payload, template, throwError = true }) => {
try {
const { content: source } = await readFileAsString(path.join(__dirname, 'emails', template));
// Read and compile the email template
const source = fs.readFileSync(path.join(__dirname, 'emails', template), 'utf8');
const compiledTemplate = handlebars.compile(source);
const html = compiledTemplate(payload);

View File

@@ -1,9 +1,10 @@
const fs = require('fs');
const LdapStrategy = require('passport-ldapauth');
const { logger } = require('@librechat/data-schemas');
const { isEnabled, getBalanceConfig } = require('@librechat/api');
const { SystemRoles, ErrorTypes } = require('librechat-data-provider');
const { isEnabled, getBalanceConfig, isEmailDomainAllowed } = require('@librechat/api');
const { createUser, findUser, updateUser, countUsers } = require('~/models');
const { isEmailDomainAllowed } = require('~/server/services/domains');
const { getAppConfig } = require('~/server/services/Config');
const {
@@ -108,8 +109,7 @@ const ldapLogin = new LdapStrategy(ldapOptions, async (userinfo, done) => {
const username =
(LDAP_USERNAME && userinfo[LDAP_USERNAME]) || userinfo.givenName || userinfo.mail;
let mail = (LDAP_EMAIL && userinfo[LDAP_EMAIL]) || userinfo.mail || username + '@ldap.local';
mail = Array.isArray(mail) ? mail[0] : mail;
const mail = (LDAP_EMAIL && userinfo[LDAP_EMAIL]) || userinfo.mail || username + '@ldap.local';
if (!userinfo.mail && !(LDAP_EMAIL && userinfo[LDAP_EMAIL])) {
logger.warn(

View File

@@ -1,183 +0,0 @@
// --- Mocks ---
jest.mock('@librechat/data-schemas', () => ({
logger: {
info: jest.fn(),
warn: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
},
}));
jest.mock('@librechat/api', () => ({
// isEnabled used for TLS flags
isEnabled: jest.fn(() => false),
isEmailDomainAllowed: jest.fn(() => true),
getBalanceConfig: jest.fn(() => ({ enabled: false })),
}));
jest.mock('~/models', () => ({
findUser: jest.fn(),
createUser: jest.fn(),
updateUser: jest.fn(),
countUsers: jest.fn(),
}));
jest.mock('~/server/services/Config', () => ({
getAppConfig: jest.fn().mockResolvedValue({}),
}));
// Mock passport-ldapauth to capture verify callback
let verifyCallback;
jest.mock('passport-ldapauth', () => {
return jest.fn().mockImplementation((options, verify) => {
verifyCallback = verify; // capture the strategy verify function
return { name: 'ldap', options, verify };
});
});
const { ErrorTypes } = require('librechat-data-provider');
const { isEmailDomainAllowed } = require('@librechat/api');
const { findUser, createUser, updateUser, countUsers } = require('~/models');
// Helper to call the verify callback and wrap in a Promise for convenience
const callVerify = (userinfo) =>
new Promise((resolve, reject) => {
verifyCallback(userinfo, (err, user, info) => {
if (err) return reject(err);
resolve({ user, info });
});
});
describe('ldapStrategy', () => {
beforeEach(() => {
jest.clearAllMocks();
// minimal required env for ldapStrategy module to export
process.env.LDAP_URL = 'ldap://example.com';
process.env.LDAP_USER_SEARCH_BASE = 'ou=users,dc=example,dc=com';
// Unset optional envs to exercise defaults
delete process.env.LDAP_CA_CERT_PATH;
delete process.env.LDAP_FULL_NAME;
delete process.env.LDAP_ID;
delete process.env.LDAP_USERNAME;
delete process.env.LDAP_EMAIL;
delete process.env.LDAP_TLS_REJECT_UNAUTHORIZED;
delete process.env.LDAP_STARTTLS;
// Default model/domain mocks
findUser.mockReset().mockResolvedValue(null);
createUser.mockReset().mockResolvedValue('newUserId');
updateUser.mockReset().mockImplementation(async (id, user) => ({ _id: id, ...user }));
countUsers.mockReset().mockResolvedValue(0);
isEmailDomainAllowed.mockReset().mockReturnValue(true);
// Ensure requiring the strategy sets up the verify callback
jest.isolateModules(() => {
require('./ldapStrategy');
});
});
it('uses the first email when LDAP returns multiple emails (array)', async () => {
const userinfo = {
uid: 'uid123',
givenName: 'Alice',
cn: 'Alice Doe',
mail: ['first@example.com', 'second@example.com'],
};
const { user } = await callVerify(userinfo);
expect(user.email).toBe('first@example.com');
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({
provider: 'ldap',
ldapId: 'uid123',
username: 'Alice',
email: 'first@example.com',
emailVerified: true,
name: 'Alice Doe',
}),
expect.any(Object),
);
});
it('blocks login if an existing user has a different provider', async () => {
findUser.mockResolvedValue({ _id: 'u1', email: 'first@example.com', provider: 'google' });
const userinfo = {
uid: 'uid123',
mail: 'first@example.com',
givenName: 'Alice',
cn: 'Alice Doe',
};
const { user, info } = await callVerify(userinfo);
expect(user).toBe(false);
expect(info).toEqual({ message: ErrorTypes.AUTH_FAILED });
expect(createUser).not.toHaveBeenCalled();
});
it('updates an existing ldap user with current LDAP info', async () => {
const existing = {
_id: 'u2',
provider: 'ldap',
email: 'old@example.com',
ldapId: 'uid123',
username: 'olduser',
name: 'Old Name',
};
findUser.mockResolvedValue(existing);
const userinfo = {
uid: 'uid123',
mail: 'new@example.com',
givenName: 'NewFirst',
cn: 'NewFirst NewLast',
};
const { user } = await callVerify(userinfo);
expect(createUser).not.toHaveBeenCalled();
expect(updateUser).toHaveBeenCalledWith(
'u2',
expect.objectContaining({
provider: 'ldap',
ldapId: 'uid123',
email: 'new@example.com',
username: 'NewFirst',
name: 'NewFirst NewLast',
}),
);
expect(user.email).toBe('new@example.com');
});
it('falls back to username@ldap.local when no email attributes are present', async () => {
const userinfo = {
uid: 'uid999',
givenName: 'John',
cn: 'John Doe',
// no mail and no custom LDAP_EMAIL
};
const { user } = await callVerify(userinfo);
expect(user.email).toBe('John@ldap.local');
});
it('denies login if email domain is not allowed', async () => {
isEmailDomainAllowed.mockReturnValue(false);
const userinfo = {
uid: 'uid123',
mail: 'notallowed@blocked.com',
givenName: 'Alice',
cn: 'Alice Doe',
};
const { user, info } = await callVerify(userinfo);
expect(user).toBe(false);
expect(info).toEqual({ message: 'Email domain not allowed' });
});
});

View File

@@ -41,18 +41,13 @@ const openIdJwtLogin = (openIdConfig) => {
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
secretOrKeyProvider: jwksRsa.passportJwtSecret(jwksRsaOptions),
},
/**
* @param {import('openid-client').IDToken} payload
* @param {import('passport-jwt').VerifyCallback} done
*/
async (payload, done) => {
try {
const { user, error, migration } = await findOpenIDUser({
findUser,
email: payload?.email,
openidId: payload?.sub,
idOnTheSource: payload?.oid,
email: payload?.email,
strategyName: 'openIdJwtLogin',
findUser,
});
if (error) {

View File

@@ -13,9 +13,9 @@ const {
safeStringify,
findOpenIDUser,
getBalanceConfig,
isEmailDomainAllowed,
} = require('@librechat/api');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { isEmailDomainAllowed } = require('~/server/services/domains');
const { findUser, createUser, updateUser } = require('~/models');
const { getAppConfig } = require('~/server/services/Config');
const getLogStores = require('~/cache/getLogStores');
@@ -337,10 +337,6 @@ async function setupOpenId() {
clockTolerance: process.env.OPENID_CLOCK_TOLERANCE || 300,
usePKCE,
},
/**
* @param {import('openid-client').TokenEndpointResponseHelpers} tokenset
* @param {import('passport-jwt').VerifyCallback} done
*/
async (tokenset, done) => {
try {
const claims = tokenset.claims();
@@ -358,11 +354,10 @@ async function setupOpenId() {
}
const result = await findOpenIDUser({
findUser,
email: claims.email,
openidId: claims.sub,
idOnTheSource: claims.oid,
email: claims.email,
strategyName: 'openidStrategy',
findUser,
});
let user = result.user;
const error = result.error;
@@ -376,10 +371,6 @@ async function setupOpenId() {
const fullName = getFullName(userinfo);
if (requiredRole) {
const requiredRoles = requiredRole
.split(',')
.map((role) => role.trim())
.filter(Boolean);
let decodedToken = '';
if (requiredRoleTokenKind === 'access') {
decodedToken = jwtDecode(tokenset.access_token);
@@ -402,13 +393,9 @@ async function setupOpenId() {
);
}
if (!requiredRoles.some((role) => roles.includes(role))) {
const rolesList =
requiredRoles.length === 1
? `"${requiredRoles[0]}"`
: `one of: ${requiredRoles.map((r) => `"${r}"`).join(', ')}`;
if (!roles.includes(requiredRole)) {
return done(null, false, {
message: `You must have ${rolesList} role to log in.`,
message: `You must have the "${requiredRole}" role to log in.`,
});
}
}
@@ -441,10 +428,6 @@ async function setupOpenId() {
user.username = username;
user.name = fullName;
user.idOnTheSource = userinfo.oid;
if (userinfo.email && userinfo.email !== user.email) {
user.email = userinfo.email;
user.emailVerified = userinfo.email_verified || false;
}
}
if (!!userinfo && userinfo.picture && !user.avatar?.includes('manual=true')) {

View File

@@ -274,7 +274,10 @@ describe('setupOpenId', () => {
name: '',
};
findUser.mockImplementation(async (query) => {
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
if (
query.openidId === tokenset.claims().sub ||
(query.email === tokenset.claims().email && query.provider === 'openid')
) {
return existingUser;
}
return null;
@@ -335,25 +338,7 @@ describe('setupOpenId', () => {
// Assert verify that the strategy rejects login
expect(user).toBe(false);
expect(details.message).toBe('You must have "requiredRole" role to log in.');
});
it('should allow login when single required role is present (backward compatibility)', async () => {
// Arrange ensure single role configuration (as set in beforeEach)
// OPENID_REQUIRED_ROLE = 'requiredRole'
// Default jwtDecode mock in beforeEach already returns this role
jwtDecode.mockReturnValue({
roles: ['requiredRole', 'anotherRole'],
});
// Act
const { user } = await validate(tokenset);
// Assert verify that login succeeds with single role configuration
expect(user).toBeTruthy();
expect(user.email).toBe(tokenset.claims().email);
expect(user.username).toBe(tokenset.claims().preferred_username);
expect(createUser).toHaveBeenCalled();
expect(details.message).toBe('You must have the "requiredRole" role to log in.');
});
it('should attempt to download and save the avatar if picture is provided', async () => {
@@ -379,58 +364,6 @@ describe('setupOpenId', () => {
// Depending on your implementation, user.avatar may be undefined or an empty string.
});
it('should support comma-separated multiple roles', async () => {
// Arrange
process.env.OPENID_REQUIRED_ROLE = 'someRole,anotherRole,admin';
await setupOpenId(); // Re-initialize the strategy
verifyCallback = require('openid-client/passport').__getVerifyCallback();
jwtDecode.mockReturnValue({
roles: ['anotherRole', 'aThirdRole'],
});
// Act
const { user } = await validate(tokenset);
// Assert
expect(user).toBeTruthy();
expect(user.email).toBe(tokenset.claims().email);
});
it('should reject login when user has none of the required multiple roles', async () => {
// Arrange
process.env.OPENID_REQUIRED_ROLE = 'someRole,anotherRole,admin';
await setupOpenId(); // Re-initialize the strategy
verifyCallback = require('openid-client/passport').__getVerifyCallback();
jwtDecode.mockReturnValue({
roles: ['aThirdRole', 'aFourthRole'],
});
// Act
const { user, details } = await validate(tokenset);
// Assert
expect(user).toBe(false);
expect(details.message).toBe(
'You must have one of: "someRole", "anotherRole", "admin" role to log in.',
);
});
it('should handle spaces in comma-separated roles', async () => {
// Arrange
process.env.OPENID_REQUIRED_ROLE = ' someRole , anotherRole , admin ';
await setupOpenId(); // Re-initialize the strategy
verifyCallback = require('openid-client/passport').__getVerifyCallback();
jwtDecode.mockReturnValue({
roles: ['someRole'],
});
// Act
const { user } = await validate(tokenset);
// Assert
expect(user).toBeTruthy();
});
it('should default to usePKCE false when OPENID_USE_PKCE is not defined', async () => {
const OpenIDStrategy = require('openid-client/passport').Strategy;

View File

@@ -2,11 +2,12 @@ const fs = require('fs');
const path = require('path');
const fetch = require('node-fetch');
const passport = require('passport');
const { getBalanceConfig } = require('@librechat/api');
const { ErrorTypes } = require('librechat-data-provider');
const { hashToken, logger } = require('@librechat/data-schemas');
const { Strategy: SamlStrategy } = require('@node-saml/passport-saml');
const { getBalanceConfig, isEmailDomainAllowed } = require('@librechat/api');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { isEmailDomainAllowed } = require('~/server/services/domains');
const { findUser, createUser, updateUser } = require('~/models');
const { getAppConfig } = require('~/server/services/Config');
const paths = require('~/config/paths');

View File

@@ -26,7 +26,6 @@ jest.mock('~/server/services/Config', () => ({
getAppConfig: jest.fn().mockResolvedValue({}),
}));
jest.mock('@librechat/api', () => ({
isEmailDomainAllowed: jest.fn(() => true),
getBalanceConfig: jest.fn(() => ({
tokenCredits: 1000,
startBalance: 1000,

View File

@@ -1,7 +1,8 @@
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { ErrorTypes } = require('librechat-data-provider');
const { isEnabled, isEmailDomainAllowed } = require('@librechat/api');
const { createSocialUser, handleExistingUser } = require('./process');
const { isEmailDomainAllowed } = require('~/server/services/domains');
const { getAppConfig } = require('~/server/services/Config');
const { findUser } = require('~/models');

View File

@@ -46,7 +46,7 @@ describe('fileSearch.js - test only new file_id and page additions', () => {
queryVectors.mockResolvedValue(mockResults);
const fileSearchTool = await createFileSearchTool({
userId: 'user1',
req: { user: { id: 'user1' } },
files: mockFiles,
entity_id: 'agent-123',
});

View File

@@ -873,13 +873,6 @@
* @typedef {import('@librechat/data-schemas').IMongoFile} MongoFile
* @memberof typedefs
*/
/**
* @exports ISession
* @typedef {import('@librechat/data-schemas').ISession} ISession
* @memberof typedefs
*/
/**
* @exports IBalance
* @typedef {import('@librechat/data-schemas').IBalance} IBalance

View File

@@ -262,15 +262,6 @@ describe('getModelMaxTokens', () => {
expect(getModelMaxTokens('gemini-1.5-pro-preview-0409', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
);
expect(getModelMaxTokens('gemini-2.5-pro', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['gemini-2.5-pro'],
);
expect(getModelMaxTokens('gemini-2.5-flash', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['gemini-2.5-flash'],
);
expect(getModelMaxTokens('gemini-2.5-flash-lite', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['gemini-2.5-flash-lite'],
);
expect(getModelMaxTokens('gemini-pro-vision', EModelEndpoint.google)).toBe(
maxTokensMap[EModelEndpoint.google]['gemini-pro-vision'],
);

View File

@@ -1,4 +1,3 @@
/** v0.8.0 */
module.exports = {
roots: ['<rootDir>/src'],
testEnvironment: 'jsdom',
@@ -29,8 +28,7 @@ module.exports = {
'jest-file-loader',
'^test/(.*)$': '<rootDir>/test/$1',
'^~/(.*)$': '<rootDir>/src/$1',
'^librechat-data-provider/react-query$':
'<rootDir>/../node_modules/librechat-data-provider/src/react-query',
'^librechat-data-provider/react-query$': '<rootDir>/../node_modules/librechat-data-provider/src/react-query',
},
restoreMocks: true,
testResultsProcessor: 'jest-junit',

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/frontend",
"version": "v0.8.0",
"version": "v0.8.0-rc3",
"description": "",
"type": "module",
"scripts": {

View File

@@ -1,15 +1,13 @@
import React, { createContext, useContext, useState, useMemo } from 'react';
import { EModelEndpoint } from 'librechat-data-provider';
import type { MCP, Action, TPlugin } from 'librechat-data-provider';
import { Constants, EModelEndpoint } from 'librechat-data-provider';
import type { MCP, Action, TPlugin, AgentToolType } from 'librechat-data-provider';
import type { AgentPanelContextType, MCPServerInfo } from '~/common';
import {
useAvailableToolsQuery,
useGetActionsQuery,
useGetStartupConfig,
useMCPToolsQuery,
} from '~/data-provider';
import { useAvailableToolsQuery, useGetActionsQuery, useGetStartupConfig } from '~/data-provider';
import { useLocalize, useGetAgentsConfig, useMCPConnectionStatus } from '~/hooks';
import { Panel, isEphemeralAgent } from '~/common';
import { Panel } from '~/common';
type GroupedToolType = AgentToolType & { tools?: AgentToolType[] };
type GroupedToolsRecord = Record<string, GroupedToolType>;
const AgentPanelContext = createContext<AgentPanelContextType | undefined>(undefined);
@@ -30,67 +28,79 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
const [activePanel, setActivePanel] = useState<Panel>(Panel.builder);
const [agent_id, setCurrentAgentId] = useState<string | undefined>(undefined);
const { data: startupConfig } = useGetStartupConfig();
const { data: actions } = useGetActionsQuery(EModelEndpoint.agents, {
enabled: !isEphemeralAgent(agent_id),
enabled: !!agent_id,
});
const { data: regularTools } = useAvailableToolsQuery(EModelEndpoint.agents, {
enabled: !isEphemeralAgent(agent_id),
const { data: pluginTools } = useAvailableToolsQuery(EModelEndpoint.agents, {
enabled: !!agent_id,
});
const { data: mcpData } = useMCPToolsQuery({
enabled: !isEphemeralAgent(agent_id) && startupConfig?.mcpServers != null,
});
const { agentsConfig, endpointsConfig } = useGetAgentsConfig();
const { data: startupConfig } = useGetStartupConfig();
const mcpServerNames = useMemo(
() => Object.keys(startupConfig?.mcpServers ?? {}),
[startupConfig],
);
const { connectionStatus } = useMCPConnectionStatus({
enabled: !isEphemeralAgent(agent_id) && mcpServerNames.length > 0,
enabled: !!agent_id && mcpServerNames.length > 0,
});
const mcpServersMap = useMemo(() => {
const processedData = useMemo(() => {
if (!pluginTools) {
return {
tools: [],
groupedTools: {},
mcpServersMap: new Map<string, MCPServerInfo>(),
};
}
const tools: AgentToolType[] = [];
const groupedTools: GroupedToolsRecord = {};
const configuredServers = new Set(mcpServerNames);
const serversMap = new Map<string, MCPServerInfo>();
const mcpServersMap = new Map<string, MCPServerInfo>();
if (mcpData?.servers) {
for (const [serverName, serverData] of Object.entries(mcpData.servers)) {
const metadata = {
name: serverName,
pluginKey: serverName,
description: `${localize('com_ui_tool_collection_prefix')} ${serverName}`,
icon: serverData.icon || '',
authConfig: serverData.authConfig,
authenticated: serverData.authenticated,
} as TPlugin;
for (const pluginTool of pluginTools) {
const tool: AgentToolType = {
tool_id: pluginTool.pluginKey,
metadata: pluginTool as TPlugin,
};
const tools = serverData.tools.map((tool) => ({
tool_id: tool.pluginKey,
metadata: {
...tool,
icon: serverData.icon,
authConfig: serverData.authConfig,
authenticated: serverData.authenticated,
} as TPlugin,
}));
tools.push(tool);
serversMap.set(serverName, {
serverName,
tools,
isConfigured: configuredServers.has(serverName),
isConnected: connectionStatus?.[serverName]?.connectionState === 'connected',
metadata,
});
if (tool.tool_id.includes(Constants.mcp_delimiter)) {
const [_toolName, serverName] = tool.tool_id.split(Constants.mcp_delimiter);
if (!mcpServersMap.has(serverName)) {
const metadata = {
name: serverName,
pluginKey: serverName,
description: `${localize('com_ui_tool_collection_prefix')} ${serverName}`,
icon: pluginTool.icon || '',
} as TPlugin;
mcpServersMap.set(serverName, {
serverName,
tools: [],
isConfigured: configuredServers.has(serverName),
isConnected: connectionStatus?.[serverName]?.connectionState === 'connected',
metadata,
});
}
mcpServersMap.get(serverName)!.tools.push(tool);
} else {
// Non-MCP tool
groupedTools[tool.tool_id] = {
tool_id: tool.tool_id,
metadata: tool.metadata,
};
}
}
// Add configured servers that don't have tools yet
for (const mcpServerName of mcpServerNames) {
if (serversMap.has(mcpServerName)) {
if (mcpServersMap.has(mcpServerName)) {
continue;
}
const metadata = {
@@ -100,7 +110,7 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
description: `${localize('com_ui_tool_collection_prefix')} ${mcpServerName}`,
} as TPlugin;
serversMap.set(mcpServerName, {
mcpServersMap.set(mcpServerName, {
tools: [],
metadata,
isConfigured: true,
@@ -109,8 +119,14 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
});
}
return serversMap;
}, [mcpData, localize, mcpServerNames, connectionStatus]);
return {
tools,
groupedTools,
mcpServersMap,
};
}, [pluginTools, localize, mcpServerNames, connectionStatus]);
const { agentsConfig, endpointsConfig } = useGetAgentsConfig();
const value: AgentPanelContextType = {
mcp,
@@ -121,14 +137,16 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
setMcps,
agent_id,
setAction,
pluginTools,
activePanel,
regularTools,
agentsConfig,
startupConfig,
mcpServersMap,
setActivePanel,
endpointsConfig,
setCurrentAgentId,
tools: processedData.tools,
groupedTools: processedData.groupedTools,
mcpServersMap: processedData.mcpServersMap,
};
return <AgentPanelContext.Provider value={value}>{children}</AgentPanelContext.Provider>;

View File

@@ -1,32 +0,0 @@
import React, { createContext, useContext, useMemo } from 'react';
import { useChatContext } from './ChatContext';
interface DragDropContextValue {
conversationId: string | null | undefined;
agentId: string | null | undefined;
}
const DragDropContext = createContext<DragDropContextValue | undefined>(undefined);
export function DragDropProvider({ children }: { children: React.ReactNode }) {
const { conversation } = useChatContext();
/** Context value only created when conversation fields change */
const contextValue = useMemo<DragDropContextValue>(
() => ({
conversationId: conversation?.conversationId,
agentId: conversation?.agent_id,
}),
[conversation?.conversationId, conversation?.agent_id],
);
return <DragDropContext.Provider value={contextValue}>{children}</DragDropContext.Provider>;
}
export function useDragDropContext() {
const context = useContext(DragDropContext);
if (!context) {
throw new Error('useDragDropContext must be used within DragDropProvider');
}
return context;
}

View File

@@ -1,15 +1,10 @@
import { createContext, useContext } from 'react';
type MessageContext = {
messageId: string;
nextType?: string;
partIndex?: number;
isExpanded: boolean;
conversationId?: string | null;
/** Submission state for cursor display - only true for latest message when submitting */
isSubmitting?: boolean;
/** Whether this is the latest message in the conversation */
isLatestMessage?: boolean;
};
export const MessageContext = createContext<MessageContext>({} as MessageContext);

View File

@@ -1,150 +0,0 @@
import React, { createContext, useContext, useMemo } from 'react';
import { useAddedChatContext } from './AddedChatContext';
import { useChatContext } from './ChatContext';
interface MessagesViewContextValue {
/** Core conversation data */
conversation: ReturnType<typeof useChatContext>['conversation'];
conversationId: string | null | undefined;
/** Submission and control states */
isSubmitting: ReturnType<typeof useChatContext>['isSubmitting'];
isSubmittingFamily: boolean;
abortScroll: ReturnType<typeof useChatContext>['abortScroll'];
setAbortScroll: ReturnType<typeof useChatContext>['setAbortScroll'];
/** Message operations */
ask: ReturnType<typeof useChatContext>['ask'];
regenerate: ReturnType<typeof useChatContext>['regenerate'];
handleContinue: ReturnType<typeof useChatContext>['handleContinue'];
/** Message state management */
index: ReturnType<typeof useChatContext>['index'];
latestMessage: ReturnType<typeof useChatContext>['latestMessage'];
setLatestMessage: ReturnType<typeof useChatContext>['setLatestMessage'];
getMessages: ReturnType<typeof useChatContext>['getMessages'];
setMessages: ReturnType<typeof useChatContext>['setMessages'];
}
const MessagesViewContext = createContext<MessagesViewContextValue | undefined>(undefined);
export function MessagesViewProvider({ children }: { children: React.ReactNode }) {
const chatContext = useChatContext();
const addedChatContext = useAddedChatContext();
const {
ask,
index,
regenerate,
isSubmitting: isSubmittingRoot,
conversation,
latestMessage,
setAbortScroll,
handleContinue,
setLatestMessage,
abortScroll,
getMessages,
setMessages,
} = chatContext;
const { isSubmitting: isSubmittingAdditional } = addedChatContext;
/** Memoize conversation-related values */
const conversationValues = useMemo(
() => ({
conversation,
conversationId: conversation?.conversationId,
}),
[conversation],
);
/** Memoize submission states */
const submissionStates = useMemo(
() => ({
isSubmitting: isSubmittingRoot,
isSubmittingFamily: isSubmittingRoot || isSubmittingAdditional,
abortScroll,
setAbortScroll,
}),
[isSubmittingRoot, isSubmittingAdditional, abortScroll, setAbortScroll],
);
/** Memoize message operations (these are typically stable references) */
const messageOperations = useMemo(
() => ({
ask,
regenerate,
getMessages,
setMessages,
handleContinue,
}),
[ask, regenerate, handleContinue, getMessages, setMessages],
);
/** Memoize message state values */
const messageState = useMemo(
() => ({
index,
latestMessage,
setLatestMessage,
}),
[index, latestMessage, setLatestMessage],
);
/** Combine all values into final context value */
const contextValue = useMemo<MessagesViewContextValue>(
() => ({
...conversationValues,
...submissionStates,
...messageOperations,
...messageState,
}),
[conversationValues, submissionStates, messageOperations, messageState],
);
return (
<MessagesViewContext.Provider value={contextValue}>{children}</MessagesViewContext.Provider>
);
}
export function useMessagesViewContext() {
const context = useContext(MessagesViewContext);
if (!context) {
throw new Error('useMessagesViewContext must be used within MessagesViewProvider');
}
return context;
}
/** Hook for components that only need conversation data */
export function useMessagesConversation() {
const { conversation, conversationId } = useMessagesViewContext();
return useMemo(() => ({ conversation, conversationId }), [conversation, conversationId]);
}
/** Hook for components that only need submission states */
export function useMessagesSubmission() {
const { isSubmitting, isSubmittingFamily, abortScroll, setAbortScroll } =
useMessagesViewContext();
return useMemo(
() => ({ isSubmitting, isSubmittingFamily, abortScroll, setAbortScroll }),
[isSubmitting, isSubmittingFamily, abortScroll, setAbortScroll],
);
}
/** Hook for components that only need message operations */
export function useMessagesOperations() {
const { ask, regenerate, handleContinue, getMessages, setMessages } = useMessagesViewContext();
return useMemo(
() => ({ ask, regenerate, handleContinue, getMessages, setMessages }),
[ask, regenerate, handleContinue, getMessages, setMessages],
);
}
/** Hook for components that only need message state */
export function useMessagesState() {
const { index, latestMessage, setLatestMessage } = useMessagesViewContext();
return useMemo(
() => ({ index, latestMessage, setLatestMessage }),
[index, latestMessage, setLatestMessage],
);
}

View File

@@ -1,10 +1,9 @@
import React, { createContext, useContext, ReactNode, useMemo } from 'react';
import { PermissionTypes, Permissions } from 'librechat-data-provider';
import type { TPromptGroup } from 'librechat-data-provider';
import type { PromptOption } from '~/common';
import CategoryIcon from '~/components/Prompts/Groups/CategoryIcon';
import { usePromptGroupsNav, useHasAccess } from '~/hooks';
import { useGetAllPromptGroups } from '~/data-provider';
import { usePromptGroupsNav } from '~/hooks';
import { mapPromptGroups } from '~/utils';
type AllPromptGroupsData =
@@ -20,21 +19,14 @@ type PromptGroupsContextType =
data: AllPromptGroupsData;
isLoading: boolean;
};
hasAccess: boolean;
})
| null;
const PromptGroupsContext = createContext<PromptGroupsContextType>(null);
export const PromptGroupsProvider = ({ children }: { children: ReactNode }) => {
const hasAccess = useHasAccess({
permissionType: PermissionTypes.PROMPTS,
permission: Permissions.USE,
});
const promptGroupsNav = usePromptGroupsNav(hasAccess);
const promptGroupsNav = usePromptGroupsNav();
const { data: allGroupsData, isLoading: isLoadingAll } = useGetAllPromptGroups(undefined, {
enabled: hasAccess,
select: (data) => {
const mappedArray: PromptOption[] = data.map((group) => ({
id: group._id ?? '',
@@ -63,12 +55,11 @@ export const PromptGroupsProvider = ({ children }: { children: ReactNode }) => {
() => ({
...promptGroupsNav,
allPromptGroups: {
data: hasAccess ? allGroupsData : undefined,
isLoading: hasAccess ? isLoadingAll : false,
data: allGroupsData,
isLoading: isLoadingAll,
},
hasAccess,
}),
[promptGroupsNav, allGroupsData, isLoadingAll, hasAccess],
[promptGroupsNav, allGroupsData, isLoadingAll],
);
return (

View File

@@ -23,9 +23,7 @@ export * from './SetConvoContext';
export * from './SearchContext';
export * from './BadgeRowContext';
export * from './SidePanelContext';
export * from './DragDropContext';
export * from './MCPPanelContext';
export * from './ArtifactsContext';
export * from './PromptGroupsContext';
export * from './MessagesViewContext';
export { default as BadgeRowProvider } from './BadgeRowContext';

View File

@@ -1,5 +1,5 @@
import React from 'react';
import { TStartupConfig } from 'librechat-data-provider';
import { TModelSpec, TStartupConfig } from 'librechat-data-provider';
export interface Endpoint {
value: string;

View File

@@ -1,5 +1,5 @@
import { RefObject } from 'react';
import { Constants, FileSources, EModelEndpoint } from 'librechat-data-provider';
import { FileSources, EModelEndpoint } from 'librechat-data-provider';
import type { UseMutationResult } from '@tanstack/react-query';
import type * as InputNumberPrimitive from 'rc-input-number';
import type { SetterOrUpdater, RecoilState } from 'recoil';
@@ -8,10 +8,6 @@ import type * as t from 'librechat-data-provider';
import type { LucideIcon } from 'lucide-react';
import type { TranslationKeys } from '~/hooks';
export function isEphemeralAgent(agentId: string | null | undefined): boolean {
return agentId == null || agentId === '' || agentId === Constants.EPHEMERAL_AGENT_ID;
}
export interface ConfigFieldDetail {
title: string;
description: string;
@@ -236,8 +232,10 @@ export type AgentPanelContextType = {
mcps?: t.MCP[];
setMcp: React.Dispatch<React.SetStateAction<t.MCP | undefined>>;
setMcps: React.Dispatch<React.SetStateAction<t.MCP[] | undefined>>;
groupedTools: Record<string, t.AgentToolType & { tools?: t.AgentToolType[] }>;
activePanel?: string;
regularTools?: t.TPlugin[];
tools: t.AgentToolType[];
pluginTools?: t.TPlugin[];
setActivePanel: React.Dispatch<React.SetStateAction<Panel>>;
setCurrentAgentId: React.Dispatch<React.SetStateAction<string | undefined>>;
agent_id?: string;
@@ -352,6 +350,7 @@ export type TAskProps = {
conversationId?: string | null;
messageId?: string | null;
clientTimestamp?: string;
toolResources?: t.AgentToolResources;
};
export type TOptions = {
@@ -644,3 +643,10 @@ declare global {
google_tag_manager?: unknown;
}
}
export type UIResource = {
uri: string;
mimeType: string;
text: string;
[key: string]: unknown;
};

View File

@@ -11,9 +11,9 @@ import {
AgentListResponse,
} from 'librechat-data-provider';
import type t from 'librechat-data-provider';
import { useLocalize, useDefaultConvo } from '~/hooks';
import { useChatContext } from '~/Providers';
import { renderAgentAvatar } from '~/utils';
import { useLocalize } from '~/hooks';
interface SupportContact {
name?: string;
@@ -34,11 +34,11 @@ interface AgentDetailProps {
*/
const AgentDetail: React.FC<AgentDetailProps> = ({ agent, isOpen, onClose }) => {
const localize = useLocalize();
const queryClient = useQueryClient();
// const navigate = useNavigate();
const { conversation, newConversation } = useChatContext();
const { showToast } = useToastContext();
const dialogRef = useRef<HTMLDivElement>(null);
const getDefaultConversation = useDefaultConvo();
const { conversation, newConversation } = useChatContext();
const queryClient = useQueryClient();
/**
* Navigate to chat with the selected agent
@@ -62,22 +62,13 @@ const AgentDetail: React.FC<AgentDetailProps> = ({ agent, isOpen, onClose }) =>
);
queryClient.invalidateQueries([QueryKeys.messages]);
/** Template with agent configuration */
const template = {
conversationId: Constants.NEW_CONVO as string,
endpoint: EModelEndpoint.agents,
agent_id: agent.id,
title: localize('com_agents_chat_with', { name: agent.name || localize('com_ui_agent') }),
};
const currentConvo = getDefaultConversation({
conversation: { ...(conversation ?? {}), ...template },
preset: template,
});
newConversation({
template: currentConvo,
preset: template,
template: {
conversationId: Constants.NEW_CONVO as string,
endpoint: EModelEndpoint.agents,
agent_id: agent.id,
title: `Chat with ${agent.name || 'Agent'}`,
},
});
}
};

View File

@@ -13,7 +13,7 @@ interface AgentGridProps {
category: string; // Currently selected category
searchQuery: string; // Current search query
onSelectAgent: (agent: t.Agent) => void; // Callback when agent is selected
scrollElementRef?: React.RefObject<HTMLElement>; // Parent scroll container ref for infinite scroll
scrollElement?: HTMLElement | null; // Parent scroll container for infinite scroll
}
/**
@@ -23,7 +23,7 @@ const AgentGrid: React.FC<AgentGridProps> = ({
category,
searchQuery,
onSelectAgent,
scrollElementRef,
scrollElement,
}) => {
const localize = useLocalize();
@@ -87,7 +87,7 @@ const AgentGrid: React.FC<AgentGridProps> = ({
// Set up infinite scroll
const { setScrollElement } = useInfiniteScroll({
hasNextPage,
isLoading: isFetching || isFetchingNextPage,
isFetchingNextPage,
fetchNextPage: () => {
if (hasNextPage && !isFetching) {
fetchNextPage();
@@ -99,11 +99,10 @@ const AgentGrid: React.FC<AgentGridProps> = ({
// Connect the scroll element when it's provided
useEffect(() => {
const scrollElement = scrollElementRef?.current;
if (scrollElement) {
setScrollElement(scrollElement);
}
}, [scrollElementRef, setScrollElement]);
}, [scrollElement, setScrollElement]);
/**
* Get category display name from API data or use fallback

View File

@@ -197,21 +197,21 @@ const AgentMarketplace: React.FC<AgentMarketplaceProps> = ({ className = '' }) =
*/
const handleSearch = (query: string) => {
const newParams = new URLSearchParams(searchParams);
const currentCategory = displayCategory;
if (query.trim()) {
newParams.set('q', query.trim());
// Switch to "all" category when starting a new search
navigate(`/agents/all?${newParams.toString()}`);
} else {
newParams.delete('q');
}
// Always preserve current category when searching or clearing search
if (currentCategory === 'promoted') {
navigate(`/agents${newParams.toString() ? `?${newParams.toString()}` : ''}`);
} else {
navigate(
`/agents/${currentCategory}${newParams.toString() ? `?${newParams.toString()}` : ''}`,
);
// Preserve current category when clearing search
const currentCategory = displayCategory;
if (currentCategory === 'promoted') {
navigate(`/agents${newParams.toString() ? `?${newParams.toString()}` : ''}`);
} else {
navigate(
`/agents/${currentCategory}${newParams.toString() ? `?${newParams.toString()}` : ''}`,
);
}
}
};
@@ -427,7 +427,7 @@ const AgentMarketplace: React.FC<AgentMarketplaceProps> = ({ className = '' }) =
category={displayCategory}
searchQuery={searchQuery}
onSelectAgent={handleAgentSelect}
scrollElementRef={scrollContainerRef}
scrollElement={scrollContainerRef.current}
/>
</div>
@@ -507,7 +507,7 @@ const AgentMarketplace: React.FC<AgentMarketplaceProps> = ({ className = '' }) =
category={nextCategory}
searchQuery={searchQuery}
onSelectAgent={handleAgentSelect}
scrollElementRef={scrollContainerRef}
scrollElement={scrollContainerRef.current}
/>
</div>
)}

View File

@@ -20,7 +20,6 @@ jest.mock('react-router-dom', () => ({
jest.mock('~/hooks', () => ({
useMediaQuery: jest.fn(() => false), // Mock as desktop by default
useLocalize: jest.fn(),
useDefaultConvo: jest.fn(),
}));
jest.mock('@librechat/client', () => ({
@@ -48,12 +47,7 @@ const mockWriteText = jest.fn();
const mockNavigate = jest.fn();
const mockShowToast = jest.fn();
const mockLocalize = jest.fn((key: string, values?: Record<string, any>) => {
if (key === 'com_agents_chat_with' && values?.name) {
return `Chat with ${values.name}`;
}
return key;
});
const mockLocalize = jest.fn((key: string) => key);
const mockAgent: t.Agent = {
id: 'test-agent-id',
@@ -112,12 +106,8 @@ describe('AgentDetail', () => {
(useNavigate as jest.Mock).mockReturnValue(mockNavigate);
const { useToastContext } = require('@librechat/client');
(useToastContext as jest.Mock).mockReturnValue({ showToast: mockShowToast });
const { useLocalize, useDefaultConvo } = require('~/hooks');
const { useLocalize } = require('~/hooks');
(useLocalize as jest.Mock).mockReturnValue(mockLocalize);
(useDefaultConvo as jest.Mock).mockReturnValue(() => ({
conversationId: Constants.NEW_CONVO,
endpoint: EModelEndpoint.agents,
}));
// Mock useChatContext
const { useChatContext } = require('~/Providers');
@@ -237,10 +227,6 @@ describe('AgentDetail', () => {
template: {
conversationId: Constants.NEW_CONVO,
endpoint: EModelEndpoint.agents,
},
preset: {
conversationId: Constants.NEW_CONVO,
endpoint: EModelEndpoint.agents,
agent_id: 'test-agent-id',
title: 'Chat with Test Agent',
},

View File

@@ -1,6 +1,5 @@
import React from 'react';
import { render, screen, fireEvent, waitFor, act } from '@testing-library/react';
import { render, screen, fireEvent } from '@testing-library/react';
import '@testing-library/jest-dom';
import AgentGrid from '../AgentGrid';
import type t from 'librechat-data-provider';
@@ -82,115 +81,6 @@ import { useMarketplaceAgentsInfiniteQuery } from '~/data-provider/Agents';
const mockUseMarketplaceAgentsInfiniteQuery = jest.mocked(useMarketplaceAgentsInfiniteQuery);
// Helper to create mock API response
const createMockResponse = (
agentIds: string[],
hasMore: boolean,
afterCursor?: string,
): t.AgentListResponse => ({
object: 'list',
data: agentIds.map(
(id) =>
({
id,
name: `Agent ${id}`,
description: `Description for ${id}`,
created_at: Date.now(),
model: 'gpt-4',
tools: [],
instructions: '',
avatar: null,
provider: 'openai',
model_parameters: {
temperature: 0.7,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
maxContextTokens: 2000,
max_context_tokens: 2000,
max_output_tokens: 2000,
},
}) as t.Agent,
),
first_id: agentIds[0] || '',
last_id: agentIds[agentIds.length - 1] || '',
has_more: hasMore,
after: afterCursor,
});
// Helper to setup mock viewport
const setupViewport = (scrollHeight: number, clientHeight: number) => {
const listeners: { [key: string]: EventListener[] } = {};
return {
scrollHeight,
clientHeight,
scrollTop: 0,
addEventListener: jest.fn((event: string, listener: EventListener) => {
if (!listeners[event]) {
listeners[event] = [];
}
listeners[event].push(listener);
}),
removeEventListener: jest.fn((event: string, listener: EventListener) => {
if (listeners[event]) {
listeners[event] = listeners[event].filter((l) => l !== listener);
}
}),
dispatchEvent: jest.fn((event: Event) => {
const eventListeners = listeners[event.type];
if (eventListeners) {
eventListeners.forEach((listener) => listener(event));
}
return true;
}),
} as unknown as HTMLElement;
};
// Helper to create mock infinite query return value
const createMockInfiniteQuery = (
pages: t.AgentListResponse[],
options?: {
isLoading?: boolean;
hasNextPage?: boolean;
fetchNextPage?: jest.Mock;
isFetchingNextPage?: boolean;
},
) =>
({
data: {
pages,
pageParams: pages.map((_, i) => (i === 0 ? undefined : `cursor-${i * 6}`)),
},
isLoading: options?.isLoading ?? false,
error: null,
isFetching: false,
hasNextPage: options?.hasNextPage ?? pages[pages.length - 1]?.has_more ?? false,
isFetchingNextPage: options?.isFetchingNextPage ?? false,
fetchNextPage: options?.fetchNextPage ?? jest.fn(),
refetch: jest.fn(),
// Add missing required properties for UseInfiniteQueryResult
isError: false,
isLoadingError: false,
isRefetchError: false,
isSuccess: true,
status: 'success' as const,
dataUpdatedAt: Date.now(),
errorUpdateCount: 0,
errorUpdatedAt: 0,
failureCount: 0,
failureReason: null,
fetchStatus: 'idle' as const,
isFetched: true,
isFetchedAfterMount: true,
isInitialLoading: false,
isPaused: false,
isPlaceholderData: false,
isPending: false,
isRefetching: false,
isStale: false,
remove: jest.fn(),
}) as any;
describe('AgentGrid Integration with useGetMarketplaceAgentsQuery', () => {
const mockOnSelectAgent = jest.fn();
@@ -453,15 +343,6 @@ describe('AgentGrid Integration with useGetMarketplaceAgentsQuery', () => {
});
describe('Infinite Scroll Functionality', () => {
beforeEach(() => {
// Silence console.log in tests
jest.spyOn(console, 'log').mockImplementation(() => {});
});
afterEach(() => {
jest.restoreAllMocks();
});
it('should show loading indicator when fetching next page', () => {
mockUseMarketplaceAgentsInfiniteQuery.mockReturnValue({
...defaultMockQueryResult,
@@ -515,358 +396,5 @@ describe('AgentGrid Integration with useGetMarketplaceAgentsQuery', () => {
expect(screen.queryByText("You've reached the end of the results")).not.toBeInTheDocument();
});
describe('Auto-fetch to fill viewport', () => {
it('should NOT auto-fetch when viewport is filled (5 agents, has_more=false)', async () => {
const mockResponse = createMockResponse(['1', '2', '3', '4', '5'], false);
const fetchNextPage = jest.fn();
mockUseMarketplaceAgentsInfiniteQuery.mockReturnValue(
createMockInfiniteQuery([mockResponse], { fetchNextPage }),
);
const scrollElement = setupViewport(500, 1000); // Content smaller than viewport
const scrollElementRef = { current: scrollElement };
const Wrapper = createWrapper();
render(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Wait for initial render
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(5);
});
// Wait to ensure no auto-fetch happens
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 200));
});
// fetchNextPage should NOT be called since has_more is false
expect(fetchNextPage).not.toHaveBeenCalled();
});
it('should auto-fetch when viewport not filled (7 agents, big viewport)', async () => {
const firstPage = createMockResponse(['1', '2', '3', '4', '5', '6'], true, 'cursor-6');
const secondPage = createMockResponse(['7'], false);
let currentPages = [firstPage];
const fetchNextPage = jest.fn();
// Mock that updates pages when fetchNextPage is called
mockUseMarketplaceAgentsInfiniteQuery.mockImplementation(() =>
createMockInfiniteQuery(currentPages, {
fetchNextPage: jest.fn().mockImplementation(() => {
fetchNextPage();
currentPages = [firstPage, secondPage];
return Promise.resolve();
}),
hasNextPage: true,
}),
);
const scrollElement = setupViewport(400, 1200); // Large viewport (content < viewport)
const scrollElementRef = { current: scrollElement };
const Wrapper = createWrapper();
const { rerender } = render(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Wait for initial 6 agents
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(6);
});
// Wait for ResizeObserver and auto-fetch to trigger
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 150));
});
// Auto-fetch should have been triggered (multiple times due to reliability checks)
expect(fetchNextPage).toHaveBeenCalled();
expect(fetchNextPage.mock.calls.length).toBeGreaterThanOrEqual(1);
// Update mock data and re-render
currentPages = [firstPage, secondPage];
rerender(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Should now show all 7 agents
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(7);
});
});
it('should NOT auto-fetch when viewport is filled (7 agents, small viewport)', async () => {
const firstPage = createMockResponse(['1', '2', '3', '4', '5', '6'], true, 'cursor-6');
const fetchNextPage = jest.fn();
mockUseMarketplaceAgentsInfiniteQuery.mockReturnValue(
createMockInfiniteQuery([firstPage], { fetchNextPage, hasNextPage: true }),
);
const scrollElement = setupViewport(1200, 600); // Small viewport, content fills it
const scrollElementRef = { current: scrollElement };
const Wrapper = createWrapper();
render(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Wait for initial 6 agents
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(6);
});
// Wait to ensure no auto-fetch happens
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 200));
});
// Should NOT auto-fetch since viewport is filled
expect(fetchNextPage).not.toHaveBeenCalled();
});
it('should auto-fetch once to fill viewport then stop (20 agents)', async () => {
const allPages = [
createMockResponse(['1', '2', '3', '4', '5', '6'], true, 'cursor-6'),
createMockResponse(['7', '8', '9', '10', '11', '12'], true, 'cursor-12'),
createMockResponse(['13', '14', '15', '16', '17', '18'], true, 'cursor-18'),
createMockResponse(['19', '20'], false),
];
let currentPages = [allPages[0]];
let fetchCount = 0;
const fetchNextPage = jest.fn();
mockUseMarketplaceAgentsInfiniteQuery.mockImplementation(() =>
createMockInfiniteQuery(currentPages, {
fetchNextPage: jest.fn().mockImplementation(() => {
fetchCount++;
fetchNextPage();
if (currentPages.length < 2) {
currentPages = allPages.slice(0, 2);
}
return Promise.resolve();
}),
hasNextPage: currentPages.length < 2,
}),
);
const scrollElement = setupViewport(600, 1000); // Viewport fits ~12 agents
const scrollElementRef = { current: scrollElement };
const Wrapper = createWrapper();
const { rerender } = render(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Wait for initial 6 agents
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(6);
});
// Should auto-fetch to fill viewport
await waitFor(
() => {
expect(fetchNextPage).toHaveBeenCalledTimes(1);
},
{ timeout: 500 },
);
// Simulate viewport being filled after 12 agents
Object.defineProperty(scrollElement, 'scrollHeight', {
value: 1200,
writable: true,
configurable: true,
});
currentPages = allPages.slice(0, 2);
rerender(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Should show 12 agents
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(12);
});
// Wait to ensure no additional auto-fetch
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 200));
});
// Should only have fetched once (to fill viewport)
expect(fetchCount).toBe(1);
expect(fetchNextPage).toHaveBeenCalledTimes(1);
});
it('should auto-fetch when viewport resizes to be taller (window resize)', async () => {
const firstPage = createMockResponse(['1', '2', '3', '4', '5', '6'], true, 'cursor-6');
const secondPage = createMockResponse(['7', '8', '9', '10', '11', '12'], true, 'cursor-12');
let currentPages = [firstPage];
const fetchNextPage = jest.fn();
let resizeObserverCallback: ResizeObserverCallback | null = null;
// Mock that updates pages when fetchNextPage is called
mockUseMarketplaceAgentsInfiniteQuery.mockImplementation(() =>
createMockInfiniteQuery(currentPages, {
fetchNextPage: jest.fn().mockImplementation(() => {
fetchNextPage();
if (currentPages.length === 1) {
currentPages = [firstPage, secondPage];
}
return Promise.resolve();
}),
hasNextPage: currentPages.length === 1,
}),
);
// Mock ResizeObserver to capture the callback
const ResizeObserverMock = jest.fn().mockImplementation((callback) => {
resizeObserverCallback = callback;
return {
observe: jest.fn(),
disconnect: jest.fn(),
unobserve: jest.fn(),
};
});
global.ResizeObserver = ResizeObserverMock as any;
// Start with a small viewport that fits the content
const scrollElement = setupViewport(800, 600);
const scrollElementRef = { current: scrollElement };
const Wrapper = createWrapper();
const { rerender } = render(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Wait for initial 6 agents
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(6);
});
// Verify ResizeObserver was set up
expect(ResizeObserverMock).toHaveBeenCalled();
expect(resizeObserverCallback).not.toBeNull();
// Initially no fetch should happen as viewport is filled
await act(async () => {
await new Promise((resolve) => setTimeout(resolve, 100));
});
expect(fetchNextPage).not.toHaveBeenCalled();
// Simulate window resize - make viewport taller
Object.defineProperty(scrollElement, 'clientHeight', {
value: 1200, // Now taller than content
writable: true,
configurable: true,
});
// Trigger ResizeObserver callback to simulate resize detection
act(() => {
if (resizeObserverCallback) {
resizeObserverCallback(
[
{
target: scrollElement,
contentRect: {
x: 0,
y: 0,
width: 800,
height: 1200,
top: 0,
right: 800,
bottom: 1200,
left: 0,
} as DOMRectReadOnly,
borderBoxSize: [],
contentBoxSize: [],
devicePixelContentBoxSize: [],
} as ResizeObserverEntry,
],
{} as ResizeObserver,
);
}
});
// Should trigger auto-fetch due to viewport now being larger than content
await waitFor(
() => {
expect(fetchNextPage).toHaveBeenCalledTimes(1);
},
{ timeout: 500 },
);
// Update the component with new data
rerender(
<Wrapper>
<AgentGrid
category="all"
searchQuery=""
onSelectAgent={mockOnSelectAgent}
scrollElementRef={scrollElementRef}
/>
</Wrapper>,
);
// Should now show 12 agents after fetching
await waitFor(() => {
expect(screen.getAllByRole('gridcell')).toHaveLength(12);
});
});
});
});
});

View File

@@ -4,7 +4,7 @@ import {
SandpackProvider,
SandpackProviderProps,
} from '@codesandbox/sandpack-react/unstyled';
import type { SandpackPreviewRef, PreviewProps } from '@codesandbox/sandpack-react/unstyled';
import type { SandpackPreviewRef } from '@codesandbox/sandpack-react/unstyled';
import type { TStartupConfig } from 'librechat-data-provider';
import type { ArtifactFiles } from '~/common';
import { sharedFiles, sharedOptions } from '~/utils/artifacts';
@@ -13,7 +13,6 @@ export const ArtifactPreview = memo(function ({
files,
fileKey,
template,
isMermaid,
sharedProps,
previewRef,
currentCode,
@@ -21,7 +20,6 @@ export const ArtifactPreview = memo(function ({
}: {
files: ArtifactFiles;
fileKey: string;
isMermaid: boolean;
template: SandpackProviderProps['template'];
sharedProps: Partial<SandpackProviderProps>;
previewRef: React.MutableRefObject<SandpackPreviewRef>;
@@ -56,15 +54,6 @@ export const ArtifactPreview = memo(function ({
return _options;
}, [startupConfig, template]);
const style: PreviewProps['style'] | undefined = useMemo(() => {
if (isMermaid) {
return {
backgroundColor: '#282C34',
};
}
return;
}, [isMermaid]);
if (Object.keys(artifactFiles).length === 0) {
return null;
}
@@ -84,7 +73,6 @@ export const ArtifactPreview = memo(function ({
showRefreshButton={false}
tabIndex={0}
ref={previewRef}
style={style}
/>
</SandpackProvider>
);

View File

@@ -8,7 +8,6 @@ import { useAutoScroll } from '~/hooks/Artifacts/useAutoScroll';
import { ArtifactCodeEditor } from './ArtifactCodeEditor';
import { useGetStartupConfig } from '~/data-provider';
import { ArtifactPreview } from './ArtifactPreview';
import { MermaidMarkdown } from './MermaidMarkdown';
import { cn } from '~/utils';
export default function ArtifactTabs({
@@ -45,25 +44,23 @@ export default function ArtifactTabs({
id="artifacts-code"
className={cn('flex-grow overflow-auto')}
>
{isMermaid ? (
<MermaidMarkdown content={content} isSubmitting={isSubmitting} />
) : (
<ArtifactCodeEditor
files={files}
fileKey={fileKey}
template={template}
artifact={artifact}
editorRef={editorRef}
sharedProps={sharedProps}
/>
)}
<ArtifactCodeEditor
files={files}
fileKey={fileKey}
template={template}
artifact={artifact}
editorRef={editorRef}
sharedProps={sharedProps}
/>
</Tabs.Content>
<Tabs.Content value="preview" className="flex-grow overflow-auto">
<Tabs.Content
value="preview"
className={cn('flex-grow overflow-auto', isMermaid ? 'bg-[#282C34]' : 'bg-white')}
>
<ArtifactPreview
files={files}
fileKey={fileKey}
template={template}
isMermaid={isMermaid}
previewRef={previewRef}
sharedProps={sharedProps}
currentCode={currentCode}

View File

@@ -9,7 +9,6 @@ import { useEditorContext } from '~/Providers';
import ArtifactTabs from './ArtifactTabs';
import { CopyCodeButton } from './Code';
import { useLocalize } from '~/hooks';
import { cn } from '~/utils';
import store from '~/store';
export default function Artifacts() {
@@ -59,10 +58,9 @@ export default function Artifacts() {
<div className="flex h-full w-full items-center justify-center">
{/* Main Container */}
<div
className={cn(
`flex h-full w-full flex-col overflow-hidden border border-border-medium bg-surface-primary text-xl text-text-primary shadow-xl transition-all duration-500 ease-in-out`,
isVisible ? 'scale-100 opacity-100 blur-0' : 'scale-105 opacity-0 blur-sm',
)}
className={`flex h-full w-full flex-col overflow-hidden border border-border-medium bg-surface-primary text-xl text-text-primary shadow-xl transition-all duration-500 ease-in-out ${
isVisible ? 'scale-100 opacity-100 blur-0' : 'scale-105 opacity-0 blur-sm'
}`}
>
{/* Header */}
<div className="flex items-center justify-between border-b border-border-medium bg-surface-primary-alt p-2">
@@ -76,17 +74,16 @@ export default function Artifacts() {
{/* Refresh button */}
{activeTab === 'preview' && (
<button
className={cn(
'mr-2 text-text-secondary transition-transform duration-500 ease-in-out',
isRefreshing ? 'rotate-180' : '',
)}
className={`mr-2 text-text-secondary transition-transform duration-500 ease-in-out ${
isRefreshing ? 'rotate-180' : ''
}`}
onClick={handleRefresh}
disabled={isRefreshing}
aria-label="Refresh"
>
<RefreshCw
size={16}
className={cn('transform', isRefreshing ? 'animate-spin' : '')}
className={`transform ${isRefreshing ? 'animate-spin' : ''}`}
/>
</button>
)}

View File

@@ -1,11 +0,0 @@
import { CodeMarkdown } from './Code';
export function MermaidMarkdown({
content,
isSubmitting,
}: {
content: string;
isSubmitting: boolean;
}) {
return <CodeMarkdown content={`\`\`\`mermaid\n${content}\`\`\``} isSubmitting={isSubmitting} />;
}

View File

@@ -1,6 +1,6 @@
import React, { useRef, useState, useMemo } from 'react';
import * as Ariakit from '@ariakit/react';
import { useRecoilState } from 'recoil';
import { useSetRecoilState } from 'recoil';
import { FileSearch, ImageUpIcon, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
import { EToolResources, EModelEndpoint, defaultAgentCapabilities } from 'librechat-data-provider';
import {
@@ -42,9 +42,7 @@ const AttachFileMenu = ({
const isUploadDisabled = disabled ?? false;
const inputRef = useRef<HTMLInputElement>(null);
const [isPopoverActive, setIsPopoverActive] = useState(false);
const [ephemeralAgent, setEphemeralAgent] = useRecoilState(
ephemeralAgentByConvoId(conversationId),
);
const setEphemeralAgent = useSetRecoilState(ephemeralAgentByConvoId(conversationId));
const [toolResource, setToolResource] = useState<EToolResources | undefined>();
const { handleFileChange } = useFileHandling({
overrideEndpoint: EModelEndpoint.agents,
@@ -66,10 +64,7 @@ const AttachFileMenu = ({
* */
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(
agentId,
ephemeralAgent,
);
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(agentId);
const handleUploadClick = (isImage?: boolean) => {
if (!inputRef.current) {
@@ -94,11 +89,11 @@ const AttachFileMenu = ({
},
];
if (capabilities.contextEnabled) {
if (capabilities.ocrEnabled) {
items.push({
label: localize('com_ui_upload_ocr_text'),
onClick: () => {
setToolResource(EToolResources.context);
setToolResource(EToolResources.ocr);
onAction();
},
icon: <FileType2Icon className="icon-md" />,

View File

@@ -1,16 +1,14 @@
import React, { useMemo } from 'react';
import { useRecoilValue } from 'recoil';
import { OGDialog, OGDialogTemplate } from '@librechat/client';
import { EToolResources, defaultAgentCapabilities } from 'librechat-data-provider';
import { ImageUpIcon, FileSearch, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
import { EToolResources, defaultAgentCapabilities } from 'librechat-data-provider';
import {
useAgentToolPermissions,
useAgentCapabilities,
useGetAgentsConfig,
useLocalize,
} from '~/hooks';
import { ephemeralAgentByConvoId } from '~/store';
import { useDragDropContext } from '~/Providers';
import { useChatContext } from '~/Providers';
interface DragDropModalProps {
onOptionSelect: (option: EToolResources | undefined) => void;
@@ -34,11 +32,9 @@ const DragDropModal = ({ onOptionSelect, setShowModal, files, isVisible }: DragD
* Use definition for agents endpoint for ephemeral agents
* */
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
const { conversationId, agentId } = useDragDropContext();
const ephemeralAgent = useRecoilValue(ephemeralAgentByConvoId(conversationId ?? ''));
const { conversation } = useChatContext();
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(
agentId,
ephemeralAgent,
conversation?.agent_id,
);
const options = useMemo(() => {
@@ -64,10 +60,10 @@ const DragDropModal = ({ onOptionSelect, setShowModal, files, isVisible }: DragD
icon: <TerminalSquareIcon className="icon-md" />,
});
}
if (capabilities.contextEnabled) {
if (capabilities.ocrEnabled) {
_options.push({
label: localize('com_ui_upload_ocr_text'),
value: EToolResources.context,
value: EToolResources.ocr,
icon: <FileType2Icon className="icon-md" />,
});
}

View File

@@ -1,7 +1,6 @@
import { useDragHelpers } from '~/hooks';
import DragDropOverlay from '~/components/Chat/Input/Files/DragDropOverlay';
import DragDropModal from '~/components/Chat/Input/Files/DragDropModal';
import { DragDropProvider } from '~/Providers';
import { cn } from '~/utils';
interface DragDropWrapperProps {
@@ -20,14 +19,12 @@ export default function DragDropWrapper({ children, className }: DragDropWrapper
{children}
{/** Always render overlay to avoid mount/unmount overhead */}
<DragDropOverlay isActive={isActive} />
<DragDropProvider>
<DragDropModal
files={draggedFiles}
isVisible={showModal}
setShowModal={setShowModal}
onOptionSelect={handleOptionSelect}
/>
</DragDropProvider>
<DragDropModal
files={draggedFiles}
isVisible={showModal}
setShowModal={setShowModal}
onOptionSelect={handleOptionSelect}
/>
</div>
);
}

View File

@@ -8,7 +8,6 @@ function MCPSelectContent() {
const { conversationId, mcpServerManager } = useBadgeRowContext();
const {
localize,
isPinned,
mcpValues,
isInitializing,
placeholderText,
@@ -69,10 +68,6 @@ function MCPSelectContent() {
[getServerStatusIconProps, isInitializing],
);
if (!isPinned && mcpValues?.length === 0) {
return null;
}
const configDialogProps = getConfigDialogProps();
return (

View File

@@ -2,13 +2,14 @@ import { useState, useRef, useEffect, useMemo, memo, useCallback } from 'react';
import { AutoSizer, List } from 'react-virtualized';
import { Spinner, useCombobox } from '@librechat/client';
import { useSetRecoilState, useRecoilValue } from 'recoil';
import type { TPromptGroup } from 'librechat-data-provider';
import { PermissionTypes, Permissions } from 'librechat-data-provider';
import type { TPromptGroup, AgentToolResources } from 'librechat-data-provider';
import type { PromptOption } from '~/common';
import { removeCharIfLast, detectVariables } from '~/utils';
import VariableDialog from '~/components/Prompts/Groups/VariableDialog';
import { usePromptGroupsContext } from '~/Providers';
import { useLocalize, useHasAccess } from '~/hooks';
import MentionItem from './MentionItem';
import { useLocalize } from '~/hooks';
import store from '~/store';
const commandChar = '/';
@@ -50,10 +51,15 @@ function PromptsCommand({
}: {
index: number;
textAreaRef: React.MutableRefObject<HTMLTextAreaElement | null>;
submitPrompt: (textPrompt: string) => void;
submitPrompt: (textPrompt: string, toolResources?: AgentToolResources) => void;
}) {
const localize = useLocalize();
const { allPromptGroups, hasAccess } = usePromptGroupsContext();
const hasAccess = useHasAccess({
permissionType: PermissionTypes.PROMPTS,
permission: Permissions.USE,
});
const { allPromptGroups } = usePromptGroupsContext();
const { data, isLoading } = allPromptGroups;
const [activeIndex, setActiveIndex] = useState(0);
@@ -89,7 +95,6 @@ function PromptsCommand({
if (!group) {
return;
}
const hasVariables = detectVariables(group.productionPrompt?.prompt ?? '');
if (hasVariables) {
if (e && e.key === 'Tab') {
@@ -99,7 +104,7 @@ function PromptsCommand({
setVariableDialogOpen(true);
return;
} else {
submitPrompt(group.productionPrompt?.prompt ?? '');
submitPrompt(group.productionPrompt?.prompt ?? '', group.productionPrompt?.tool_resources);
}
},
[setSearchValue, setOpen, setShowPromptsPopover, textAreaRef, promptsMap, submitPrompt],

View File

@@ -26,7 +26,6 @@ type ContentPartsProps = {
isCreatedByUser: boolean;
isLast: boolean;
isSubmitting: boolean;
isLatestMessage?: boolean;
edit?: boolean;
enterEdit?: (cancel?: boolean) => void | null | undefined;
siblingIdx?: number;
@@ -46,7 +45,6 @@ const ContentParts = memo(
isCreatedByUser,
isLast,
isSubmitting,
isLatestMessage,
edit,
enterEdit,
siblingIdx,
@@ -57,8 +55,6 @@ const ContentParts = memo(
const [isExpanded, setIsExpanded] = useState(showThinking);
const attachmentMap = useMemo(() => mapAttachments(attachments ?? []), [attachments]);
const effectiveIsSubmitting = isLatestMessage ? isSubmitting : false;
const hasReasoningParts = useMemo(() => {
const hasThinkPart = content?.some((part) => part?.type === ContentTypes.THINK) ?? false;
const allThinkPartsHaveContent =
@@ -138,9 +134,7 @@ const ContentParts = memo(
})
}
label={
effectiveIsSubmitting && isLast
? localize('com_ui_thinking')
: localize('com_ui_thoughts')
isSubmitting && isLast ? localize('com_ui_thinking') : localize('com_ui_thoughts')
}
/>
</div>
@@ -161,14 +155,12 @@ const ContentParts = memo(
conversationId,
partIndex: idx,
nextType: content[idx + 1]?.type,
isSubmitting: effectiveIsSubmitting,
isLatestMessage,
}}
>
<Part
part={part}
attachments={attachments}
isSubmitting={effectiveIsSubmitting}
isSubmitting={isSubmitting}
key={`part-${messageId}-${idx}`}
isCreatedByUser={isCreatedByUser}
isLast={idx === content.length - 1}

View File

@@ -4,7 +4,7 @@ import { useRecoilState, useRecoilValue } from 'recoil';
import { TextareaAutosize, TooltipAnchor } from '@librechat/client';
import { useUpdateMessageMutation } from 'librechat-data-provider/react-query';
import type { TEditProps } from '~/common';
import { useMessagesOperations, useMessagesConversation, useAddedChatContext } from '~/Providers';
import { useChatContext, useAddedChatContext } from '~/Providers';
import { cn, removeFocusRings } from '~/utils';
import { useLocalize } from '~/hooks';
import Container from './Container';
@@ -22,8 +22,7 @@ const EditMessage = ({
const { addedIndex } = useAddedChatContext();
const saveButtonRef = useRef<HTMLButtonElement | null>(null);
const submitButtonRef = useRef<HTMLButtonElement | null>(null);
const { conversation } = useMessagesConversation();
const { getMessages, setMessages } = useMessagesOperations();
const { getMessages, setMessages, conversation } = useChatContext();
const [latestMultiMessage, setLatestMultiMessage] = useRecoilState(
store.latestMessageFamily(addedIndex),
);

Some files were not shown because too many files have changed in this diff Show More