Compare commits
115 Commits
refactor/m
...
v0.8.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7d13cec6f | ||
|
|
37321ea10d | ||
|
|
17ab91f1fd | ||
|
|
4777bd22c5 | ||
|
|
dfe236acb5 | ||
|
|
c5d1861acf | ||
|
|
b8720a9b7a | ||
|
|
0b2fde73e3 | ||
|
|
c19b8755a7 | ||
|
|
f6e19d8034 | ||
|
|
c0eb19730a | ||
|
|
a1471c2f37 | ||
|
|
712f0b3ca2 | ||
|
|
062d813b21 | ||
|
|
4b5b46604c | ||
|
|
3d7eaf0fcc | ||
|
|
823015160c | ||
|
|
3219734b9e | ||
|
|
4f3683fd9a | ||
|
|
57f8b333bc | ||
|
|
f9aebeba92 | ||
|
|
b85950aa9a | ||
|
|
bcec5bfceb | ||
|
|
e4f323e71a | ||
|
|
d83826b604 | ||
|
|
2153db2f5f | ||
|
|
de02892396 | ||
|
|
f61e057f7f | ||
|
|
91e49d82aa | ||
|
|
880c7b43a1 | ||
|
|
c99a29f8da | ||
|
|
8a60e8990f | ||
|
|
a6bf2b6ce3 | ||
|
|
ff8dac570f | ||
|
|
96870e0da0 | ||
|
|
f0599ad36c | ||
|
|
5b1a31ef4d | ||
|
|
386900fb4f | ||
|
|
9d2aba5df5 | ||
|
|
a5195a57a4 | ||
|
|
2489670f54 | ||
|
|
0352067da2 | ||
|
|
fcaf55143d | ||
|
|
aae3694b11 | ||
|
|
68c9f668c1 | ||
|
|
8b2e1c6088 | ||
|
|
99135a3dc1 | ||
|
|
344e7c44b5 | ||
|
|
e5d2a932bc | ||
|
|
c40554c03b | ||
|
|
98af4564e8 | ||
|
|
26a58fcabc | ||
|
|
3fec63e597 | ||
|
|
81139046e5 | ||
|
|
89d12a8ccd | ||
|
|
f6d34d78ca | ||
|
|
48ca1bfd88 | ||
|
|
208be7c06c | ||
|
|
02bfe32905 | ||
|
|
4499494aba | ||
|
|
d04da60b3b | ||
|
|
0e94d97bfb | ||
|
|
45ab4d4503 | ||
|
|
0ceef12eea | ||
|
|
6738360051 | ||
|
|
52b65492d5 | ||
|
|
7a9a99d2a0 | ||
|
|
5bfb06b417 | ||
|
|
2ce8f1f686 | ||
|
|
1a47601533 | ||
|
|
5245aeea8f | ||
|
|
dd93db40bc | ||
|
|
136cf1d5a8 | ||
|
|
751522087a | ||
|
|
7fe830acfc | ||
|
|
cdfe686987 | ||
|
|
5b5723343c | ||
|
|
30c24a66f6 | ||
|
|
ecf9733bc1 | ||
|
|
133312fb40 | ||
|
|
b62ffb533c | ||
|
|
d75fb76338 | ||
|
|
51f2d43fed | ||
|
|
e3a645e8fb | ||
|
|
180046a3c5 | ||
|
|
916742ab9d | ||
|
|
d91f34dd42 | ||
|
|
5676976564 | ||
|
|
85aa3e7d9c | ||
|
|
a2ff6613c5 | ||
|
|
8d6cb5eee0 | ||
|
|
31445e391a | ||
|
|
04c3a5a861 | ||
|
|
5667cc9702 | ||
|
|
c0f95f971a | ||
|
|
f125f5bd32 | ||
|
|
f3eca8c7a7 | ||
|
|
f22e5f965e | ||
|
|
749f539dfc | ||
|
|
1247207afe | ||
|
|
5c0e9d8fbb | ||
|
|
957fa7a994 | ||
|
|
751c2e1d17 | ||
|
|
519645c0b0 | ||
|
|
0d0a318c3c | ||
|
|
588e0c4611 | ||
|
|
79144a6365 | ||
|
|
ca53c20370 | ||
|
|
d635503f49 | ||
|
|
920966f895 | ||
|
|
c46e0d3ecc | ||
|
|
c6ecf0095b | ||
|
|
7de6f6e44c | ||
|
|
035f85c3ba | ||
|
|
6f6a34d126 |
@@ -163,10 +163,10 @@ GOOGLE_KEY=user_provided
|
||||
# GOOGLE_AUTH_HEADER=true
|
||||
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite-preview-06-17,gemini-2.0-flash,gemini-2.0-flash-lite
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash,gemini-2.0-flash-lite
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite-preview-06-17,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
||||
# GOOGLE_MODELS=gemini-2.5-pro,gemini-2.5-flash,gemini-2.5-flash-lite,gemini-2.0-flash-001,gemini-2.0-flash-lite-001
|
||||
|
||||
# GOOGLE_TITLE_MODEL=gemini-2.0-flash-lite-001
|
||||
|
||||
@@ -690,8 +690,8 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||
# REDIS_PING_INTERVAL=300
|
||||
|
||||
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
|
||||
# Comma-separated list of CacheKeys (e.g., STATIC_CONFIG,ROLES,MESSAGES)
|
||||
# FORCED_IN_MEMORY_CACHE_NAMESPACES=STATIC_CONFIG,ROLES
|
||||
# Comma-separated list of CacheKeys (e.g., ROLES,MESSAGES)
|
||||
# FORCED_IN_MEMORY_CACHE_NAMESPACES=ROLES,MESSAGES
|
||||
|
||||
#==================================================#
|
||||
# Others #
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v0.8.0-rc3
|
||||
# v0.8.0
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
@@ -30,7 +30,7 @@ RUN \
|
||||
# Allow mounting of these files, which have no default
|
||||
touch .env ; \
|
||||
# Create directories for the volumes to inherit the correct permissions
|
||||
mkdir -p /app/client/public/images /app/api/logs ; \
|
||||
mkdir -p /app/client/public/images /app/api/logs /app/uploads ; \
|
||||
npm config set fetch-retry-maxtimeout 600000 ; \
|
||||
npm config set fetch-retries 5 ; \
|
||||
npm config set fetch-retry-mintimeout 15000 ; \
|
||||
@@ -44,8 +44,6 @@ RUN \
|
||||
npm prune --production; \
|
||||
npm cache clean --force
|
||||
|
||||
RUN mkdir -p /app/client/public/images /app/api/logs
|
||||
|
||||
# Node API setup
|
||||
EXPOSE 3080
|
||||
ENV HOST=0.0.0.0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Dockerfile.multi
|
||||
# v0.8.0-rc3
|
||||
# v0.8.0
|
||||
|
||||
# Base for all builds
|
||||
FROM node:20-alpine AS base-min
|
||||
|
||||
@@ -75,6 +75,7 @@
|
||||
- 🔍 **Web Search**:
|
||||
- Search the internet and retrieve relevant information to enhance your AI context
|
||||
- Combines search providers, content scrapers, and result rerankers for optimal results
|
||||
- **Customizable Jina Reranking**: Configure custom Jina API URLs for reranking services
|
||||
- **[Learn More →](https://www.librechat.ai/docs/features/web_search)**
|
||||
|
||||
- 🪄 **Generative UI with Code Artifacts**:
|
||||
|
||||
@@ -10,7 +10,17 @@ const {
|
||||
validateVisionModel,
|
||||
} = require('librechat-data-provider');
|
||||
const { SplitStreamHandler: _Handler } = require('@librechat/agents');
|
||||
const { Tokenizer, createFetch, createStreamEventHandlers } = require('@librechat/api');
|
||||
const {
|
||||
Tokenizer,
|
||||
createFetch,
|
||||
matchModelName,
|
||||
getClaudeHeaders,
|
||||
getModelMaxTokens,
|
||||
configureReasoning,
|
||||
checkPromptCacheSupport,
|
||||
getModelMaxOutputTokens,
|
||||
createStreamEventHandlers,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
truncateText,
|
||||
formatMessage,
|
||||
@@ -19,12 +29,6 @@ const {
|
||||
parseParamFromPrompt,
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const {
|
||||
getClaudeHeaders,
|
||||
configureReasoning,
|
||||
checkPromptCacheSupport,
|
||||
} = require('~/server/services/Endpoints/anthropic/helpers');
|
||||
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
|
||||
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { sleep } = require('~/server/utils');
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const { google } = require('googleapis');
|
||||
const { getModelMaxTokens } = require('@librechat/api');
|
||||
const { concat } = require('@langchain/core/utils/stream');
|
||||
const { ChatVertexAI } = require('@langchain/google-vertexai');
|
||||
const { Tokenizer, getSafetySettings } = require('@librechat/api');
|
||||
@@ -21,7 +22,6 @@ const {
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
const {
|
||||
|
||||
@@ -7,7 +7,9 @@ const {
|
||||
createFetch,
|
||||
resolveHeaders,
|
||||
constructAzureURL,
|
||||
getModelMaxTokens,
|
||||
genAzureChatCompletion,
|
||||
getModelMaxOutputTokens,
|
||||
createStreamEventHandlers,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
@@ -31,13 +33,13 @@ const {
|
||||
titleInstruction,
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const { extractBaseURL, getModelMaxTokens, getModelMaxOutputTokens } = require('~/utils');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { addSpaceIfNeeded, sleep } = require('~/server/utils');
|
||||
const { spendTokens } = require('~/models/spendTokens');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const { summaryBuffer } = require('./memory');
|
||||
const { runTitleChain } = require('./chains');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { tokenSplit } = require('./document');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { createLLM } = require('./llm');
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const { getModelMaxTokens } = require('@librechat/api');
|
||||
const BaseClient = require('../BaseClient');
|
||||
const { getModelMaxTokens } = require('../../../utils');
|
||||
|
||||
class FakeClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
|
||||
@@ -68,19 +68,19 @@ const primeFiles = async (options) => {
|
||||
/**
|
||||
*
|
||||
* @param {Object} options
|
||||
* @param {ServerRequest} options.req
|
||||
* @param {string} options.userId
|
||||
* @param {Array<{ file_id: string; filename: string }>} options.files
|
||||
* @param {string} [options.entity_id]
|
||||
* @param {boolean} [options.fileCitations=false] - Whether to include citation instructions
|
||||
* @returns
|
||||
*/
|
||||
const createFileSearchTool = async ({ req, files, entity_id, fileCitations = false }) => {
|
||||
const createFileSearchTool = async ({ userId, files, entity_id, fileCitations = false }) => {
|
||||
return tool(
|
||||
async ({ query }) => {
|
||||
if (files.length === 0) {
|
||||
return 'No files to search. Instruct the user to add files for the search.';
|
||||
}
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
const jwtToken = generateShortLivedToken(userId);
|
||||
if (!jwtToken) {
|
||||
return 'There was an error authenticating the file search request.';
|
||||
}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { SerpAPI } = require('@langchain/community/tools/serpapi');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
const { mcpToolPattern, loadWebSearchAuth, checkAccess } = require('@librechat/api');
|
||||
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
|
||||
const {
|
||||
checkAccess,
|
||||
createSafeUser,
|
||||
mcpToolPattern,
|
||||
loadWebSearchAuth,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
@@ -33,7 +38,7 @@ const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSe
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const { createMCPTool, createMCPTools } = require('~/server/services/MCP');
|
||||
const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { getCachedTools } = require('~/server/services/Config');
|
||||
const { getMCPServerTools } = require('~/server/services/Config');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
/**
|
||||
@@ -250,7 +255,6 @@ const loadTools = async ({
|
||||
|
||||
/** @type {Record<string, string>} */
|
||||
const toolContextMap = {};
|
||||
const cachedTools = (await getCachedTools({ userId: user, includeGlobal: true })) ?? {};
|
||||
const requestedMCPTools = {};
|
||||
|
||||
for (const tool of tools) {
|
||||
@@ -307,7 +311,7 @@ const loadTools = async ({
|
||||
}
|
||||
|
||||
return createFileSearchTool({
|
||||
req: options.req,
|
||||
userId: user,
|
||||
files,
|
||||
entity_id: agent?.id,
|
||||
fileCitations,
|
||||
@@ -340,7 +344,7 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
});
|
||||
};
|
||||
continue;
|
||||
} else if (tool && cachedTools && mcpToolPattern.test(tool)) {
|
||||
} else if (tool && mcpToolPattern.test(tool)) {
|
||||
const [toolName, serverName] = tool.split(Constants.mcp_delimiter);
|
||||
if (toolName === Constants.mcp_server) {
|
||||
/** Placeholder used for UI purposes */
|
||||
@@ -353,33 +357,21 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
continue;
|
||||
}
|
||||
if (toolName === Constants.mcp_all) {
|
||||
const currentMCPGenerator = async (index) =>
|
||||
createMCPTools({
|
||||
req: options.req,
|
||||
res: options.res,
|
||||
index,
|
||||
requestedMCPTools[serverName] = [
|
||||
{
|
||||
type: 'all',
|
||||
serverName,
|
||||
userMCPAuthMap,
|
||||
model: agent?.model ?? model,
|
||||
provider: agent?.provider ?? endpoint,
|
||||
signal,
|
||||
});
|
||||
requestedMCPTools[serverName] = [currentMCPGenerator];
|
||||
},
|
||||
];
|
||||
continue;
|
||||
}
|
||||
const currentMCPGenerator = async (index) =>
|
||||
createMCPTool({
|
||||
index,
|
||||
req: options.req,
|
||||
res: options.res,
|
||||
toolKey: tool,
|
||||
userMCPAuthMap,
|
||||
model: agent?.model ?? model,
|
||||
provider: agent?.provider ?? endpoint,
|
||||
signal,
|
||||
});
|
||||
|
||||
requestedMCPTools[serverName] = requestedMCPTools[serverName] || [];
|
||||
requestedMCPTools[serverName].push(currentMCPGenerator);
|
||||
requestedMCPTools[serverName].push({
|
||||
type: 'single',
|
||||
toolKey: tool,
|
||||
serverName,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -422,24 +414,65 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
|
||||
const mcpToolPromises = [];
|
||||
/** MCP server tools are initialized sequentially by server */
|
||||
let index = -1;
|
||||
for (const [serverName, generators] of Object.entries(requestedMCPTools)) {
|
||||
const failedMCPServers = new Set();
|
||||
const safeUser = createSafeUser(options.req?.user);
|
||||
for (const [serverName, toolConfigs] of Object.entries(requestedMCPTools)) {
|
||||
index++;
|
||||
for (const generator of generators) {
|
||||
/** @type {LCAvailableTools} */
|
||||
let availableTools;
|
||||
for (const config of toolConfigs) {
|
||||
try {
|
||||
if (generator && generators.length === 1) {
|
||||
if (failedMCPServers.has(serverName)) {
|
||||
continue;
|
||||
}
|
||||
const mcpParams = {
|
||||
index,
|
||||
signal,
|
||||
user: safeUser,
|
||||
userMCPAuthMap,
|
||||
res: options.res,
|
||||
model: agent?.model ?? model,
|
||||
serverName: config.serverName,
|
||||
provider: agent?.provider ?? endpoint,
|
||||
};
|
||||
|
||||
if (config.type === 'all' && toolConfigs.length === 1) {
|
||||
/** Handle async loading for single 'all' tool config */
|
||||
mcpToolPromises.push(
|
||||
generator(index).catch((error) => {
|
||||
createMCPTools(mcpParams).catch((error) => {
|
||||
logger.error(`Error loading ${serverName} tools:`, error);
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
const mcpTool = await generator(index);
|
||||
if (!availableTools) {
|
||||
try {
|
||||
availableTools = await getMCPServerTools(serverName);
|
||||
} catch (error) {
|
||||
logger.error(`Error fetching available tools for MCP server ${serverName}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/** Handle synchronous loading */
|
||||
const mcpTool =
|
||||
config.type === 'all'
|
||||
? await createMCPTools(mcpParams)
|
||||
: await createMCPTool({
|
||||
...mcpParams,
|
||||
availableTools,
|
||||
toolKey: config.toolKey,
|
||||
});
|
||||
|
||||
if (Array.isArray(mcpTool)) {
|
||||
loadedTools.push(...mcpTool);
|
||||
} else if (mcpTool) {
|
||||
loadedTools.push(mcpTool);
|
||||
} else {
|
||||
failedMCPServers.add(serverName);
|
||||
logger.warn(
|
||||
`MCP tool creation failed for "${config.toolKey}", server may be unavailable or unauthenticated.`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Error loading MCP tool for server ${serverName}:`, error);
|
||||
|
||||
25
api/cache/cacheConfig.js
vendored
25
api/cache/cacheConfig.js
vendored
@@ -1,4 +1,5 @@
|
||||
const fs = require('fs');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { math, isEnabled } = require('@librechat/api');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
|
||||
@@ -34,13 +35,35 @@ if (FORCED_IN_MEMORY_CACHE_NAMESPACES.length > 0) {
|
||||
}
|
||||
}
|
||||
|
||||
/** Helper function to safely read Redis CA certificate from file
|
||||
* @returns {string|null} The contents of the CA certificate file, or null if not set or on error
|
||||
*/
|
||||
const getRedisCA = () => {
|
||||
const caPath = process.env.REDIS_CA;
|
||||
if (!caPath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
if (fs.existsSync(caPath)) {
|
||||
return fs.readFileSync(caPath, 'utf8');
|
||||
} else {
|
||||
logger.warn(`Redis CA certificate file not found: ${caPath}`);
|
||||
return null;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to read Redis CA certificate file '${caPath}':`, error);
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
const cacheConfig = {
|
||||
FORCED_IN_MEMORY_CACHE_NAMESPACES,
|
||||
USE_REDIS,
|
||||
REDIS_URI: process.env.REDIS_URI,
|
||||
REDIS_USERNAME: process.env.REDIS_USERNAME,
|
||||
REDIS_PASSWORD: process.env.REDIS_PASSWORD,
|
||||
REDIS_CA: process.env.REDIS_CA ? fs.readFileSync(process.env.REDIS_CA, 'utf8') : null,
|
||||
REDIS_CA: getRedisCA(),
|
||||
REDIS_KEY_PREFIX: process.env[REDIS_KEY_PREFIX_VAR] || REDIS_KEY_PREFIX || '',
|
||||
REDIS_MAX_LISTENERS: math(process.env.REDIS_MAX_LISTENERS, 40),
|
||||
REDIS_PING_INTERVAL: math(process.env.REDIS_PING_INTERVAL, 0),
|
||||
|
||||
3
api/cache/cacheConfig.spec.js
vendored
3
api/cache/cacheConfig.spec.js
vendored
@@ -157,12 +157,11 @@ describe('cacheConfig', () => {
|
||||
|
||||
describe('FORCED_IN_MEMORY_CACHE_NAMESPACES validation', () => {
|
||||
test('should parse comma-separated cache keys correctly', () => {
|
||||
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = ' ROLES, STATIC_CONFIG ,MESSAGES ';
|
||||
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = ' ROLES, MESSAGES ';
|
||||
|
||||
const { cacheConfig } = require('./cacheConfig');
|
||||
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual([
|
||||
'ROLES',
|
||||
'STATIC_CONFIG',
|
||||
'MESSAGES',
|
||||
]);
|
||||
});
|
||||
|
||||
2
api/cache/getLogStores.js
vendored
2
api/cache/getLogStores.js
vendored
@@ -31,8 +31,8 @@ const namespaces = {
|
||||
[CacheKeys.SAML_SESSION]: sessionCache(CacheKeys.SAML_SESSION),
|
||||
|
||||
[CacheKeys.ROLES]: standardCache(CacheKeys.ROLES),
|
||||
[CacheKeys.APP_CONFIG]: standardCache(CacheKeys.APP_CONFIG),
|
||||
[CacheKeys.CONFIG_STORE]: standardCache(CacheKeys.CONFIG_STORE),
|
||||
[CacheKeys.STATIC_CONFIG]: standardCache(CacheKeys.STATIC_CONFIG),
|
||||
[CacheKeys.PENDING_REQ]: standardCache(CacheKeys.PENDING_REQ),
|
||||
[CacheKeys.ENCODED_DOMAINS]: new Keyv({ store: keyvMongo, namespace: CacheKeys.ENCODED_DOMAINS }),
|
||||
[CacheKeys.ABORT_KEYS]: standardCache(CacheKeys.ABORT_KEYS, Time.TEN_MINUTES),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const { MCPManager, FlowStateManager } = require('@librechat/api');
|
||||
const { EventSource } = require('eventsource');
|
||||
const { Time } = require('librechat-data-provider');
|
||||
const { MCPManager, FlowStateManager, OAuthReconnectionManager } = require('@librechat/api');
|
||||
const logger = require('./winston');
|
||||
|
||||
global.EventSource = EventSource;
|
||||
@@ -26,4 +26,6 @@ module.exports = {
|
||||
createMCPManager: MCPManager.createInstance,
|
||||
getMCPManager: MCPManager.getInstance,
|
||||
getFlowStateManager,
|
||||
createOAuthReconnectionManager: OAuthReconnectionManager.createInstance,
|
||||
getOAuthReconnectionManager: OAuthReconnectionManager.getInstance,
|
||||
};
|
||||
|
||||
@@ -1,10 +1,8 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { FlowStateManager } = require('@librechat/api');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { isEnabled, FlowStateManager } = require('@librechat/api');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
const Conversation = mongoose.models.Conversation;
|
||||
@@ -31,6 +29,81 @@ class MeiliSearchClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures indexes have proper filterable attributes configured and checks if documents have user field
|
||||
* @param {MeiliSearch} client - MeiliSearch client instance
|
||||
* @returns {Promise<boolean>} - true if configuration was updated or re-sync is needed
|
||||
*/
|
||||
async function ensureFilterableAttributes(client) {
|
||||
try {
|
||||
// Check and update messages index
|
||||
try {
|
||||
const messagesIndex = client.index('messages');
|
||||
const settings = await messagesIndex.getSettings();
|
||||
|
||||
if (!settings.filterableAttributes || !settings.filterableAttributes.includes('user')) {
|
||||
logger.info('[indexSync] Configuring messages index to filter by user...');
|
||||
await messagesIndex.updateSettings({
|
||||
filterableAttributes: ['user'],
|
||||
});
|
||||
logger.info('[indexSync] Messages index configured for user filtering');
|
||||
logger.info('[indexSync] Index configuration updated. Full re-sync will be triggered.');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if existing documents have user field indexed
|
||||
try {
|
||||
const searchResult = await messagesIndex.search('', { limit: 1 });
|
||||
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
|
||||
logger.info('[indexSync] Existing messages missing user field, re-sync needed');
|
||||
return true;
|
||||
}
|
||||
} catch (searchError) {
|
||||
logger.debug('[indexSync] Could not check message documents:', searchError.message);
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.code !== 'index_not_found') {
|
||||
logger.warn('[indexSync] Could not check/update messages index settings:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// Check and update conversations index
|
||||
try {
|
||||
const convosIndex = client.index('convos');
|
||||
const settings = await convosIndex.getSettings();
|
||||
|
||||
if (!settings.filterableAttributes || !settings.filterableAttributes.includes('user')) {
|
||||
logger.info('[indexSync] Configuring convos index to filter by user...');
|
||||
await convosIndex.updateSettings({
|
||||
filterableAttributes: ['user'],
|
||||
});
|
||||
logger.info('[indexSync] Convos index configured for user filtering');
|
||||
logger.info('[indexSync] Index configuration updated. Full re-sync will be triggered.');
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if existing documents have user field indexed
|
||||
try {
|
||||
const searchResult = await convosIndex.search('', { limit: 1 });
|
||||
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
|
||||
logger.info('[indexSync] Existing conversations missing user field, re-sync needed');
|
||||
return true;
|
||||
}
|
||||
} catch (searchError) {
|
||||
logger.debug('[indexSync] Could not check conversation documents:', searchError.message);
|
||||
}
|
||||
} catch (error) {
|
||||
if (error.code !== 'index_not_found') {
|
||||
logger.warn('[indexSync] Could not check/update convos index settings:', error.message);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[indexSync] Error ensuring filterable attributes:', error);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs the actual sync operations for messages and conversations
|
||||
*/
|
||||
@@ -47,12 +120,27 @@ async function performSync() {
|
||||
return { messagesSync: false, convosSync: false };
|
||||
}
|
||||
|
||||
/** Ensures indexes have proper filterable attributes configured */
|
||||
const configUpdated = await ensureFilterableAttributes(client);
|
||||
|
||||
let messagesSync = false;
|
||||
let convosSync = false;
|
||||
|
||||
// If configuration was just updated or documents are missing user field, force a full re-sync
|
||||
if (configUpdated) {
|
||||
logger.info('[indexSync] Forcing full re-sync to ensure user field is properly indexed...');
|
||||
|
||||
// Reset sync flags to force full re-sync
|
||||
await Message.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
|
||||
await Conversation.collection.updateMany(
|
||||
{ _meiliIndex: true },
|
||||
{ $set: { _meiliIndex: false } },
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync messages
|
||||
const messageProgress = await Message.getSyncProgress();
|
||||
if (!messageProgress.isComplete) {
|
||||
if (!messageProgress.isComplete || configUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
|
||||
);
|
||||
@@ -79,7 +167,7 @@ async function performSync() {
|
||||
|
||||
// Check if we need to sync conversations
|
||||
const convoProgress = await Conversation.getSyncProgress();
|
||||
if (!convoProgress.isComplete) {
|
||||
if (!convoProgress.isComplete || configUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
@@ -11,7 +11,7 @@ const {
|
||||
getProjectByName,
|
||||
} = require('./Project');
|
||||
const { removeAllPermissions } = require('~/server/services/PermissionService');
|
||||
const { getCachedTools } = require('~/server/services/Config');
|
||||
const { getMCPServerTools } = require('~/server/services/Config');
|
||||
const { getActions } = require('./Action');
|
||||
const { Agent } = require('~/db/models');
|
||||
|
||||
@@ -49,6 +49,14 @@ const createAgent = async (agentData) => {
|
||||
*/
|
||||
const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Get multiple agent documents based on the provided search parameters.
|
||||
*
|
||||
* @param {Object} searchParameter - The search parameters to find agents.
|
||||
* @returns {Promise<Agent[]>} Array of agent documents as plain objects.
|
||||
*/
|
||||
const getAgents = async (searchParameter) => await Agent.find(searchParameter).lean();
|
||||
|
||||
/**
|
||||
* Load an agent based on the provided ID
|
||||
*
|
||||
@@ -61,8 +69,6 @@ const getAgent = async (searchParameter) => await Agent.findOne(searchParameter)
|
||||
*/
|
||||
const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _m }) => {
|
||||
const { model, ...model_parameters } = _m;
|
||||
/** @type {Record<string, FunctionTool>} */
|
||||
const availableTools = await getCachedTools({ userId: req.user.id, includeGlobal: true });
|
||||
/** @type {TEphemeralAgent | null} */
|
||||
const ephemeralAgent = req.body.ephemeralAgent;
|
||||
const mcpServers = new Set(ephemeralAgent?.mcp);
|
||||
@@ -80,22 +86,18 @@ const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _
|
||||
|
||||
const addedServers = new Set();
|
||||
if (mcpServers.size > 0) {
|
||||
for (const toolName of Object.keys(availableTools)) {
|
||||
if (!toolName.includes(mcp_delimiter)) {
|
||||
continue;
|
||||
}
|
||||
const mcpServer = toolName.split(mcp_delimiter)?.[1];
|
||||
if (mcpServer && mcpServers.has(mcpServer)) {
|
||||
addedServers.add(mcpServer);
|
||||
tools.push(toolName);
|
||||
}
|
||||
}
|
||||
|
||||
for (const mcpServer of mcpServers) {
|
||||
if (addedServers.has(mcpServer)) {
|
||||
continue;
|
||||
}
|
||||
tools.push(`${mcp_all}${mcp_delimiter}${mcpServer}`);
|
||||
const serverTools = await getMCPServerTools(mcpServer);
|
||||
if (!serverTools) {
|
||||
tools.push(`${mcp_all}${mcp_delimiter}${mcpServer}`);
|
||||
addedServers.add(mcpServer);
|
||||
continue;
|
||||
}
|
||||
tools.push(...Object.keys(serverTools));
|
||||
addedServers.add(mcpServer);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -835,6 +837,7 @@ const countPromotedAgents = async () => {
|
||||
|
||||
module.exports = {
|
||||
getAgent,
|
||||
getAgents,
|
||||
loadAgent,
|
||||
createAgent,
|
||||
updateAgent,
|
||||
|
||||
@@ -8,6 +8,7 @@ process.env.CREDS_IV = '0123456789abcdef';
|
||||
|
||||
jest.mock('~/server/services/Config', () => ({
|
||||
getCachedTools: jest.fn(),
|
||||
getMCPServerTools: jest.fn(),
|
||||
}));
|
||||
|
||||
const mongoose = require('mongoose');
|
||||
@@ -30,7 +31,7 @@ const {
|
||||
generateActionMetadataHash,
|
||||
} = require('./Agent');
|
||||
const permissionService = require('~/server/services/PermissionService');
|
||||
const { getCachedTools } = require('~/server/services/Config');
|
||||
const { getCachedTools, getMCPServerTools } = require('~/server/services/Config');
|
||||
const { AclEntry } = require('~/db/models');
|
||||
|
||||
/**
|
||||
@@ -1929,6 +1930,16 @@ describe('models/Agent', () => {
|
||||
another_tool: {},
|
||||
});
|
||||
|
||||
// Mock getMCPServerTools to return tools for each server
|
||||
getMCPServerTools.mockImplementation(async (server) => {
|
||||
if (server === 'server1') {
|
||||
return { tool1_mcp_server1: {} };
|
||||
} else if (server === 'server2') {
|
||||
return { tool2_mcp_server2: {} };
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
const mockReq = {
|
||||
user: { id: 'user123' },
|
||||
body: {
|
||||
@@ -2113,6 +2124,14 @@ describe('models/Agent', () => {
|
||||
|
||||
getCachedTools.mockResolvedValue(availableTools);
|
||||
|
||||
// Mock getMCPServerTools to return all tools for server1
|
||||
getMCPServerTools.mockImplementation(async (server) => {
|
||||
if (server === 'server1') {
|
||||
return availableTools; // All 100 tools belong to server1
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
const mockReq = {
|
||||
user: { id: 'user123' },
|
||||
body: {
|
||||
@@ -2654,6 +2673,17 @@ describe('models/Agent', () => {
|
||||
tool_mcp_server2: {}, // Different server
|
||||
});
|
||||
|
||||
// Mock getMCPServerTools to return only tools matching the server
|
||||
getMCPServerTools.mockImplementation(async (server) => {
|
||||
if (server === 'server1') {
|
||||
// Only return tool that correctly matches server1 format
|
||||
return { tool_mcp_server1: {} };
|
||||
} else if (server === 'server2') {
|
||||
return { tool_mcp_server2: {} };
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
const mockReq = {
|
||||
user: { id: 'user123' },
|
||||
body: {
|
||||
|
||||
@@ -174,7 +174,7 @@ module.exports = {
|
||||
|
||||
if (search) {
|
||||
try {
|
||||
const meiliResults = await Conversation.meiliSearch(search);
|
||||
const meiliResults = await Conversation.meiliSearch(search, { filter: `user = "${user}"` });
|
||||
const matchingIds = Array.isArray(meiliResults.hits)
|
||||
? meiliResults.hits.map((result) => result.conversationId)
|
||||
: [];
|
||||
|
||||
@@ -239,10 +239,46 @@ const updateTagsForConversation = async (user, conversationId, tags) => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Increments tag counts for existing tags only.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {string[]} tags - Array of tag names to increment
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const bulkIncrementTagCounts = async (user, tags) => {
|
||||
if (!tags || tags.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const uniqueTags = [...new Set(tags.filter(Boolean))];
|
||||
if (uniqueTags.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const bulkOps = uniqueTags.map((tag) => ({
|
||||
updateOne: {
|
||||
filter: { user, tag },
|
||||
update: { $inc: { count: 1 } },
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await ConversationTag.bulkWrite(bulkOps);
|
||||
if (result && result.modifiedCount > 0) {
|
||||
logger.debug(
|
||||
`user: ${user} | Incremented tag counts - modified ${result.modifiedCount} tags`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[bulkIncrementTagCounts] Error incrementing tag counts', error);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getConversationTags,
|
||||
createConversationTag,
|
||||
updateConversationTag,
|
||||
deleteConversationTag,
|
||||
bulkIncrementTagCounts,
|
||||
updateTagsForConversation,
|
||||
};
|
||||
|
||||
@@ -42,7 +42,7 @@ const getToolFilesByIds = async (fileIds, toolResourceSet) => {
|
||||
$or: [],
|
||||
};
|
||||
|
||||
if (toolResourceSet.has(EToolResources.ocr)) {
|
||||
if (toolResourceSet.has(EToolResources.context)) {
|
||||
filter.$or.push({ text: { $exists: true, $ne: null }, context: FileContext.agents });
|
||||
}
|
||||
if (toolResourceSet.has(EToolResources.file_search)) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { matchModelName } = require('../utils/tokens');
|
||||
const { matchModelName } = require('@librechat/api');
|
||||
const defaultRate = 6;
|
||||
|
||||
/**
|
||||
@@ -111,8 +111,8 @@ const tokenValues = Object.assign(
|
||||
'claude-': { prompt: 0.8, completion: 2.4 },
|
||||
'command-r-plus': { prompt: 3, completion: 15 },
|
||||
'command-r': { prompt: 0.5, completion: 1.5 },
|
||||
'deepseek-reasoner': { prompt: 0.55, completion: 2.19 },
|
||||
deepseek: { prompt: 0.14, completion: 0.28 },
|
||||
'deepseek-reasoner': { prompt: 0.28, completion: 0.42 },
|
||||
deepseek: { prompt: 0.28, completion: 0.42 },
|
||||
/* cohere doesn't have rates for the older command models,
|
||||
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
@@ -124,7 +124,8 @@ const tokenValues = Object.assign(
|
||||
'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 },
|
||||
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.5-pro': { prompt: 1.25, completion: 10 },
|
||||
'gemini-2.5-flash': { prompt: 0.15, completion: 3.5 },
|
||||
'gemini-2.5-flash': { prompt: 0.3, completion: 2.5 },
|
||||
'gemini-2.5-flash-lite': { prompt: 0.075, completion: 0.4 },
|
||||
'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time
|
||||
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
|
||||
|
||||
@@ -571,6 +571,9 @@ describe('getCacheMultiplier', () => {
|
||||
|
||||
describe('Google Model Tests', () => {
|
||||
const googleModels = [
|
||||
'gemini-2.5-pro',
|
||||
'gemini-2.5-flash',
|
||||
'gemini-2.5-flash-lite',
|
||||
'gemini-2.5-pro-preview-05-06',
|
||||
'gemini-2.5-flash-preview-04-17',
|
||||
'gemini-2.5-exp',
|
||||
@@ -611,6 +614,9 @@ describe('Google Model Tests', () => {
|
||||
|
||||
it('should map to the correct model keys', () => {
|
||||
const expected = {
|
||||
'gemini-2.5-pro': 'gemini-2.5-pro',
|
||||
'gemini-2.5-flash': 'gemini-2.5-flash',
|
||||
'gemini-2.5-flash-lite': 'gemini-2.5-flash-lite',
|
||||
'gemini-2.5-pro-preview-05-06': 'gemini-2.5-pro',
|
||||
'gemini-2.5-flash-preview-04-17': 'gemini-2.5-flash',
|
||||
'gemini-2.5-exp': 'gemini-2.5',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.8.0-rc3",
|
||||
"version": "v0.8.0",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -49,14 +49,14 @@
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/openai": "^0.5.18",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.76",
|
||||
"@librechat/agents": "^2.4.82",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@microsoft/microsoft-graph-client": "^3.0.7",
|
||||
"@modelcontextprotocol/sdk": "^1.17.1",
|
||||
"@node-saml/passport-saml": "^5.1.0",
|
||||
"@waylaidwanderer/fetch-event-source": "^3.0.1",
|
||||
"axios": "^1.8.2",
|
||||
"axios": "^1.12.1",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"compression": "^1.8.1",
|
||||
"connect-redis": "^8.1.0",
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
const cookies = require('cookie');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const openIdClient = require('openid-client');
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, findOpenIDUser } = require('@librechat/api');
|
||||
const {
|
||||
requestPasswordReset,
|
||||
setOpenIDAuthTokens,
|
||||
@@ -11,8 +11,9 @@ const {
|
||||
registerUser,
|
||||
} = require('~/server/services/AuthService');
|
||||
const { findUser, getUserById, deleteAllUserSessions, findSession } = require('~/models');
|
||||
const { getOpenIdConfig } = require('~/strategies');
|
||||
const { getGraphApiToken } = require('~/server/services/GraphTokenService');
|
||||
const { getOAuthReconnectionManager } = require('~/config');
|
||||
const { getOpenIdConfig } = require('~/strategies');
|
||||
|
||||
const registrationController = async (req, res) => {
|
||||
try {
|
||||
@@ -71,8 +72,14 @@ const refreshController = async (req, res) => {
|
||||
const openIdConfig = getOpenIdConfig();
|
||||
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
|
||||
const claims = tokenset.claims();
|
||||
const user = await findUser({ email: claims.email });
|
||||
if (!user) {
|
||||
const { user, error } = await findOpenIDUser({
|
||||
findUser,
|
||||
email: claims.email,
|
||||
openidId: claims.sub,
|
||||
idOnTheSource: claims.oid,
|
||||
strategyName: 'refreshController',
|
||||
});
|
||||
if (error || !user) {
|
||||
return res.status(401).redirect('/login');
|
||||
}
|
||||
const token = setOpenIDAuthTokens(tokenset, res, user._id.toString());
|
||||
@@ -96,14 +103,25 @@ const refreshController = async (req, res) => {
|
||||
return res.status(200).send({ token, user });
|
||||
}
|
||||
|
||||
// Find the session with the hashed refresh token
|
||||
const session = await findSession({
|
||||
userId: userId,
|
||||
refreshToken: refreshToken,
|
||||
});
|
||||
/** Session with the hashed refresh token */
|
||||
const session = await findSession(
|
||||
{
|
||||
userId: userId,
|
||||
refreshToken: refreshToken,
|
||||
},
|
||||
{ lean: false },
|
||||
);
|
||||
|
||||
if (session && session.expiration > new Date()) {
|
||||
const token = await setAuthTokens(userId, res, session._id);
|
||||
const token = await setAuthTokens(userId, res, session);
|
||||
|
||||
// trigger OAuth MCP server reconnection asynchronously (best effort)
|
||||
void getOAuthReconnectionManager()
|
||||
.reconnectServers(userId)
|
||||
.catch((err) => {
|
||||
logger.error('Error reconnecting OAuth MCP servers:', err);
|
||||
});
|
||||
|
||||
res.status(200).send({ token, user });
|
||||
} else if (req?.query?.retry) {
|
||||
// Retrying from a refresh token request that failed (401)
|
||||
@@ -114,7 +132,7 @@ const refreshController = async (req, res) => {
|
||||
res.status(401).send('Refresh token expired or not found for this user');
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(`[refreshController] Refresh token: ${refreshToken}`, err);
|
||||
logger.error(`[refreshController] Invalid refresh token:`, err);
|
||||
res.status(403).send('Invalid refresh token');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,16 +1,9 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, Constants } = require('librechat-data-provider');
|
||||
const {
|
||||
getToolkitKey,
|
||||
checkPluginAuth,
|
||||
filterUniquePlugins,
|
||||
convertMCPToolToPlugin,
|
||||
convertMCPToolsToPlugins,
|
||||
} = require('@librechat/api');
|
||||
const { getCachedTools, setCachedTools, mergeUserTools } = require('~/server/services/Config');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { getToolkitKey, checkPluginAuth, filterUniquePlugins } = require('@librechat/api');
|
||||
const { getCachedTools, setCachedTools } = require('~/server/services/Config');
|
||||
const { availableTools, toolkits } = require('~/app/clients/tools');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
const { getMCPManager } = require('~/config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
const getAvailablePluginsController = async (req, res) => {
|
||||
@@ -72,63 +65,27 @@ const getAvailableTools = async (req, res) => {
|
||||
}
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const cachedToolsArray = await cache.get(CacheKeys.TOOLS);
|
||||
const cachedUserTools = await getCachedTools({ userId });
|
||||
|
||||
const appConfig = req.config ?? (await getAppConfig({ role: req.user?.role }));
|
||||
|
||||
/** @type {TPlugin[]} */
|
||||
let mcpPlugins;
|
||||
if (appConfig?.mcpConfig) {
|
||||
const mcpManager = getMCPManager();
|
||||
mcpPlugins =
|
||||
cachedUserTools != null
|
||||
? convertMCPToolsToPlugins({ functionTools: cachedUserTools, mcpManager })
|
||||
: undefined;
|
||||
}
|
||||
|
||||
if (
|
||||
cachedToolsArray != null &&
|
||||
(appConfig?.mcpConfig != null ? mcpPlugins != null && mcpPlugins.length > 0 : true)
|
||||
) {
|
||||
const dedupedTools = filterUniquePlugins([...(mcpPlugins ?? []), ...cachedToolsArray]);
|
||||
res.status(200).json(dedupedTools);
|
||||
// Return early if we have cached tools
|
||||
if (cachedToolsArray != null) {
|
||||
res.status(200).json(cachedToolsArray);
|
||||
return;
|
||||
}
|
||||
|
||||
/** @type {Record<string, FunctionTool> | null} Get tool definitions to filter which tools are actually available */
|
||||
let toolDefinitions = await getCachedTools({ includeGlobal: true });
|
||||
let prelimCachedTools;
|
||||
let toolDefinitions = await getCachedTools();
|
||||
|
||||
if (toolDefinitions == null && appConfig?.availableTools != null) {
|
||||
logger.warn('[getAvailableTools] Tool cache was empty, re-initializing from app config');
|
||||
await setCachedTools(appConfig.availableTools);
|
||||
toolDefinitions = appConfig.availableTools;
|
||||
}
|
||||
|
||||
/** @type {import('@librechat/api').LCManifestTool[]} */
|
||||
let pluginManifest = availableTools;
|
||||
|
||||
if (appConfig?.mcpConfig != null) {
|
||||
try {
|
||||
const mcpManager = getMCPManager();
|
||||
const mcpTools = await mcpManager.getAllToolFunctions(userId);
|
||||
prelimCachedTools = prelimCachedTools ?? {};
|
||||
for (const [toolKey, toolData] of Object.entries(mcpTools)) {
|
||||
const plugin = convertMCPToolToPlugin({
|
||||
toolKey,
|
||||
toolData,
|
||||
mcpManager,
|
||||
});
|
||||
if (plugin) {
|
||||
pluginManifest.push(plugin);
|
||||
}
|
||||
prelimCachedTools[toolKey] = toolData;
|
||||
}
|
||||
await mergeUserTools({ userId, cachedUserTools, userTools: prelimCachedTools });
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
'[getAvailableTools] Error loading MCP Tools, servers may still be initializing:',
|
||||
error,
|
||||
);
|
||||
}
|
||||
} else if (prelimCachedTools != null) {
|
||||
await setCachedTools(prelimCachedTools, { isGlobal: true });
|
||||
}
|
||||
|
||||
/** @type {TPlugin[]} Deduplicate and authenticate plugins */
|
||||
const uniquePlugins = filterUniquePlugins(pluginManifest);
|
||||
const authenticatedPlugins = uniquePlugins.map((plugin) => {
|
||||
@@ -139,13 +96,13 @@ const getAvailableTools = async (req, res) => {
|
||||
}
|
||||
});
|
||||
|
||||
/** Filter plugins based on availability and add MCP-specific auth config */
|
||||
/** Filter plugins based on availability */
|
||||
const toolsOutput = [];
|
||||
for (const plugin of authenticatedPlugins) {
|
||||
const isToolDefined = toolDefinitions[plugin.pluginKey] !== undefined;
|
||||
const isToolDefined = toolDefinitions?.[plugin.pluginKey] !== undefined;
|
||||
const isToolkit =
|
||||
plugin.toolkit === true &&
|
||||
Object.keys(toolDefinitions).some(
|
||||
Object.keys(toolDefinitions ?? {}).some(
|
||||
(key) => getToolkitKey({ toolkits, toolName: key }) === plugin.pluginKey,
|
||||
);
|
||||
|
||||
@@ -153,39 +110,13 @@ const getAvailableTools = async (req, res) => {
|
||||
continue;
|
||||
}
|
||||
|
||||
const toolToAdd = { ...plugin };
|
||||
|
||||
if (plugin.pluginKey.includes(Constants.mcp_delimiter)) {
|
||||
const parts = plugin.pluginKey.split(Constants.mcp_delimiter);
|
||||
const serverName = parts[parts.length - 1];
|
||||
const serverConfig = appConfig?.mcpConfig?.[serverName];
|
||||
|
||||
if (serverConfig?.customUserVars) {
|
||||
const customVarKeys = Object.keys(serverConfig.customUserVars);
|
||||
if (customVarKeys.length === 0) {
|
||||
toolToAdd.authConfig = [];
|
||||
toolToAdd.authenticated = true;
|
||||
} else {
|
||||
toolToAdd.authConfig = Object.entries(serverConfig.customUserVars).map(
|
||||
([key, value]) => ({
|
||||
authField: key,
|
||||
label: value.title || key,
|
||||
description: value.description || '',
|
||||
}),
|
||||
);
|
||||
toolToAdd.authenticated = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
toolsOutput.push(toolToAdd);
|
||||
toolsOutput.push(plugin);
|
||||
}
|
||||
|
||||
const finalTools = filterUniquePlugins(toolsOutput);
|
||||
await cache.set(CacheKeys.TOOLS, finalTools);
|
||||
|
||||
const dedupedTools = filterUniquePlugins([...(mcpPlugins ?? []), ...finalTools]);
|
||||
res.status(200).json(dedupedTools);
|
||||
res.status(200).json(finalTools);
|
||||
} catch (error) {
|
||||
logger.error('[getAvailableTools]', error);
|
||||
res.status(500).json({ message: error.message });
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { getCachedTools, getAppConfig } = require('~/server/services/Config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
@@ -17,18 +16,10 @@ jest.mock('~/server/services/Config', () => ({
|
||||
includedTools: [],
|
||||
}),
|
||||
setCachedTools: jest.fn(),
|
||||
mergeUserTools: jest.fn(),
|
||||
}));
|
||||
|
||||
// loadAndFormatTools mock removed - no longer used in PluginController
|
||||
|
||||
jest.mock('~/config', () => ({
|
||||
getMCPManager: jest.fn(() => ({
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue({}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
})),
|
||||
getFlowStateManager: jest.fn(),
|
||||
}));
|
||||
// getMCPManager mock removed - no longer used in PluginController
|
||||
|
||||
jest.mock('~/app/clients/tools', () => ({
|
||||
availableTools: [],
|
||||
@@ -159,52 +150,6 @@ describe('PluginController', () => {
|
||||
});
|
||||
|
||||
describe('getAvailableTools', () => {
|
||||
it('should use convertMCPToolsToPlugins for user-specific MCP tools', async () => {
|
||||
const mockUserTools = {
|
||||
[`tool1${Constants.mcp_delimiter}server1`]: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: `tool1${Constants.mcp_delimiter}server1`,
|
||||
description: 'Tool 1',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
getCachedTools.mockResolvedValueOnce(mockUserTools);
|
||||
mockReq.config = {
|
||||
mcpConfig: {
|
||||
server1: {},
|
||||
},
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Mock MCP manager to return empty tools initially (since getAllToolFunctions is called)
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue({}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
// Mock second call to return tool definitions (includeGlobal: true)
|
||||
getCachedTools.mockResolvedValueOnce(mockUserTools);
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
const responseData = mockRes.json.mock.calls[0][0];
|
||||
expect(responseData).toBeDefined();
|
||||
expect(Array.isArray(responseData)).toBe(true);
|
||||
expect(responseData.length).toBeGreaterThan(0);
|
||||
const convertedTool = responseData.find(
|
||||
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}server1`,
|
||||
);
|
||||
expect(convertedTool).toBeDefined();
|
||||
// The real convertMCPToolsToPlugins extracts the name from the delimiter
|
||||
expect(convertedTool.name).toBe('tool1');
|
||||
});
|
||||
|
||||
it('should use filterUniquePlugins to deduplicate combined tools', async () => {
|
||||
const mockUserTools = {
|
||||
'user-tool': {
|
||||
@@ -229,9 +174,6 @@ describe('PluginController', () => {
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Mock second call to return tool definitions
|
||||
getCachedTools.mockResolvedValueOnce(mockUserTools);
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
@@ -254,14 +196,7 @@ describe('PluginController', () => {
|
||||
require('~/app/clients/tools').availableTools.push(mockPlugin);
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
// First call returns null for user tools
|
||||
getCachedTools.mockResolvedValueOnce(null);
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Second call (with includeGlobal: true) returns the tool definitions
|
||||
// getCachedTools returns the tool definitions
|
||||
getCachedTools.mockResolvedValueOnce({
|
||||
tool1: {
|
||||
type: 'function',
|
||||
@@ -272,6 +207,10 @@ describe('PluginController', () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
@@ -302,14 +241,7 @@ describe('PluginController', () => {
|
||||
});
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
// First call returns null for user tools
|
||||
getCachedTools.mockResolvedValueOnce(null);
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Second call (with includeGlobal: true) returns the tool definitions
|
||||
// getCachedTools returns the tool definitions
|
||||
getCachedTools.mockResolvedValueOnce({
|
||||
toolkit1_function: {
|
||||
type: 'function',
|
||||
@@ -320,6 +252,10 @@ describe('PluginController', () => {
|
||||
},
|
||||
},
|
||||
});
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
@@ -331,126 +267,7 @@ describe('PluginController', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('plugin.icon behavior', () => {
|
||||
const callGetAvailableToolsWithMCPServer = async (serverConfig) => {
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
|
||||
const functionTools = {
|
||||
[`test-tool${Constants.mcp_delimiter}test-server`]: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: `test-tool${Constants.mcp_delimiter}test-server`,
|
||||
description: 'A test tool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Mock the MCP manager to return tools and server config
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue(functionTools),
|
||||
getRawConfig: jest.fn().mockReturnValue(serverConfig),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
// First call returns empty user tools
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
|
||||
// Mock getAppConfig to return the mcpConfig
|
||||
mockReq.config = {
|
||||
mcpConfig: {
|
||||
'test-server': serverConfig,
|
||||
},
|
||||
};
|
||||
|
||||
// Second call (with includeGlobal: true) returns the tool definitions
|
||||
getCachedTools.mockResolvedValueOnce(functionTools);
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
const responseData = mockRes.json.mock.calls[0][0];
|
||||
return responseData.find(
|
||||
(tool) => tool.pluginKey === `test-tool${Constants.mcp_delimiter}test-server`,
|
||||
);
|
||||
};
|
||||
|
||||
it('should set plugin.icon when iconPath is defined', async () => {
|
||||
const serverConfig = {
|
||||
iconPath: '/path/to/icon.png',
|
||||
};
|
||||
const testTool = await callGetAvailableToolsWithMCPServer(serverConfig);
|
||||
expect(testTool.icon).toBe('/path/to/icon.png');
|
||||
});
|
||||
|
||||
it('should set plugin.icon to undefined when iconPath is not defined', async () => {
|
||||
const serverConfig = {};
|
||||
const testTool = await callGetAvailableToolsWithMCPServer(serverConfig);
|
||||
expect(testTool.icon).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('helper function integration', () => {
|
||||
it('should properly handle MCP tools with custom user variables', async () => {
|
||||
const appConfig = {
|
||||
mcpConfig: {
|
||||
'test-server': {
|
||||
customUserVars: {
|
||||
API_KEY: { title: 'API Key', description: 'Your API key' },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Mock MCP tools returned by getAllToolFunctions
|
||||
const mcpToolFunctions = {
|
||||
[`tool1${Constants.mcp_delimiter}test-server`]: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: `tool1${Constants.mcp_delimiter}test-server`,
|
||||
description: 'Tool 1',
|
||||
parameters: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Mock the MCP manager to return tools
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue(mcpToolFunctions),
|
||||
getRawConfig: jest.fn().mockReturnValue({
|
||||
customUserVars: {
|
||||
API_KEY: { title: 'API Key', description: 'Your API key' },
|
||||
},
|
||||
}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
mockReq.config = appConfig;
|
||||
|
||||
// First call returns user tools (empty in this case)
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
|
||||
// Second call (with includeGlobal: true) returns tool definitions including our MCP tool
|
||||
getCachedTools.mockResolvedValueOnce(mcpToolFunctions);
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
const responseData = mockRes.json.mock.calls[0][0];
|
||||
expect(Array.isArray(responseData)).toBe(true);
|
||||
|
||||
// Find the MCP tool in the response
|
||||
const mcpTool = responseData.find(
|
||||
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}test-server`,
|
||||
);
|
||||
|
||||
// The actual implementation adds authConfig and sets authenticated to false when customUserVars exist
|
||||
expect(mcpTool).toBeDefined();
|
||||
expect(mcpTool.authConfig).toEqual([
|
||||
{ authField: 'API_KEY', label: 'API Key', description: 'Your API key' },
|
||||
]);
|
||||
expect(mcpTool.authenticated).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle error cases gracefully', async () => {
|
||||
mockCache.get.mockRejectedValue(new Error('Cache error'));
|
||||
|
||||
@@ -472,23 +289,13 @@ describe('PluginController', () => {
|
||||
|
||||
it('should handle null cachedTools and cachedUserTools', async () => {
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
// First call returns null for user tools
|
||||
getCachedTools.mockResolvedValueOnce(null);
|
||||
// getCachedTools returns empty object instead of null
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Mock MCP manager to return no tools
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue({}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
// Second call (with includeGlobal: true) returns empty object instead of null
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
// Should handle null values gracefully
|
||||
@@ -503,9 +310,9 @@ describe('PluginController', () => {
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Mock getCachedTools to return undefined for both calls
|
||||
// Mock getCachedTools to return undefined
|
||||
getCachedTools.mockReset();
|
||||
getCachedTools.mockResolvedValueOnce(undefined).mockResolvedValueOnce(undefined);
|
||||
getCachedTools.mockResolvedValueOnce(undefined);
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
@@ -514,51 +321,6 @@ describe('PluginController', () => {
|
||||
expect(mockRes.json).toHaveBeenCalledWith([]);
|
||||
});
|
||||
|
||||
it('should handle `cachedToolsArray` and `mcpPlugins` both being defined', async () => {
|
||||
const cachedTools = [{ name: 'CachedTool', pluginKey: 'cached-tool', description: 'Cached' }];
|
||||
// Use MCP delimiter for the user tool so convertMCPToolsToPlugins works
|
||||
const userTools = {
|
||||
[`user-tool${Constants.mcp_delimiter}server1`]: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: `user-tool${Constants.mcp_delimiter}server1`,
|
||||
description: 'User tool',
|
||||
parameters: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
mockCache.get.mockResolvedValue(cachedTools);
|
||||
getCachedTools.mockResolvedValueOnce(userTools);
|
||||
mockReq.config = {
|
||||
mcpConfig: {
|
||||
server1: {},
|
||||
},
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Mock MCP manager to return empty tools initially
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue({}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
// The controller expects a second call to getCachedTools
|
||||
getCachedTools.mockResolvedValueOnce({
|
||||
'cached-tool': { type: 'function', function: { name: 'cached-tool' } },
|
||||
[`user-tool${Constants.mcp_delimiter}server1`]:
|
||||
userTools[`user-tool${Constants.mcp_delimiter}server1`],
|
||||
});
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
const responseData = mockRes.json.mock.calls[0][0];
|
||||
// Should have both cached and user tools
|
||||
expect(responseData.length).toBeGreaterThanOrEqual(2);
|
||||
});
|
||||
|
||||
it('should handle empty toolDefinitions object', async () => {
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
// Reset getCachedTools to ensure clean state
|
||||
@@ -569,76 +331,12 @@ describe('PluginController', () => {
|
||||
// Ensure no plugins are available
|
||||
require('~/app/clients/tools').availableTools.length = 0;
|
||||
|
||||
// Reset MCP manager to default state
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue({}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
// With empty tool definitions, no tools should be in the final output
|
||||
expect(mockRes.json).toHaveBeenCalledWith([]);
|
||||
});
|
||||
|
||||
it('should handle MCP tools without customUserVars', async () => {
|
||||
const appConfig = {
|
||||
mcpConfig: {
|
||||
'test-server': {
|
||||
// No customUserVars defined
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const mockUserTools = {
|
||||
[`tool1${Constants.mcp_delimiter}test-server`]: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: `tool1${Constants.mcp_delimiter}test-server`,
|
||||
description: 'Tool 1',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Mock the MCP manager to return the tools
|
||||
const mockMCPManager = {
|
||||
getAllToolFunctions: jest.fn().mockResolvedValue(mockUserTools),
|
||||
getRawConfig: jest.fn().mockReturnValue({
|
||||
// No customUserVars defined
|
||||
}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
mockReq.config = appConfig;
|
||||
// First call returns empty user tools
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
|
||||
// Second call (with includeGlobal: true) returns the tool definitions
|
||||
getCachedTools.mockResolvedValueOnce(mockUserTools);
|
||||
|
||||
// Ensure no plugins in availableTools for clean test
|
||||
require('~/app/clients/tools').availableTools.length = 0;
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
const responseData = mockRes.json.mock.calls[0][0];
|
||||
expect(Array.isArray(responseData)).toBe(true);
|
||||
expect(responseData.length).toBeGreaterThan(0);
|
||||
|
||||
const mcpTool = responseData.find(
|
||||
(tool) => tool.pluginKey === `tool1${Constants.mcp_delimiter}test-server`,
|
||||
);
|
||||
|
||||
expect(mcpTool).toBeDefined();
|
||||
expect(mcpTool.authenticated).toBe(true);
|
||||
// The actual implementation sets authConfig to empty array when no customUserVars
|
||||
expect(mcpTool.authConfig).toEqual([]);
|
||||
});
|
||||
|
||||
it('should handle undefined filteredTools and includedTools', async () => {
|
||||
mockReq.config = {};
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
@@ -667,20 +365,129 @@ describe('PluginController', () => {
|
||||
require('~/app/clients/tools').availableTools.push(mockToolkit);
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
// First call returns empty object
|
||||
// getCachedTools returns empty object to avoid null reference error
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// Second call (with includeGlobal: true) returns empty object to avoid null reference error
|
||||
getCachedTools.mockResolvedValueOnce({});
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
// Should handle null toolDefinitions gracefully
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
});
|
||||
|
||||
it('should handle undefined toolDefinitions when checking isToolDefined (traversaal_search bug)', async () => {
|
||||
// This test reproduces the bug where toolDefinitions is undefined
|
||||
// and accessing toolDefinitions[plugin.pluginKey] causes a TypeError
|
||||
const mockPlugin = {
|
||||
name: 'Traversaal Search',
|
||||
pluginKey: 'traversaal_search',
|
||||
description: 'Search plugin',
|
||||
};
|
||||
|
||||
// Add the plugin to availableTools
|
||||
require('~/app/clients/tools').availableTools.push(mockPlugin);
|
||||
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
|
||||
mockReq.config = {
|
||||
mcpConfig: null,
|
||||
paths: { structuredTools: '/mock/path' },
|
||||
};
|
||||
|
||||
// CRITICAL: getCachedTools returns undefined
|
||||
// This is what causes the bug when trying to access toolDefinitions[plugin.pluginKey]
|
||||
getCachedTools.mockResolvedValueOnce(undefined);
|
||||
|
||||
// This should not throw an error with the optional chaining fix
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
// Should handle undefined toolDefinitions gracefully and return empty array
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
expect(mockRes.json).toHaveBeenCalledWith([]);
|
||||
});
|
||||
|
||||
it('should re-initialize tools from appConfig when cache returns null', async () => {
|
||||
// Setup: Initial state with tools in appConfig
|
||||
const mockAppTools = {
|
||||
tool1: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'tool1',
|
||||
description: 'Tool 1',
|
||||
parameters: {},
|
||||
},
|
||||
},
|
||||
tool2: {
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'tool2',
|
||||
description: 'Tool 2',
|
||||
parameters: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Add matching plugins to availableTools
|
||||
require('~/app/clients/tools').availableTools.push(
|
||||
{ name: 'Tool 1', pluginKey: 'tool1', description: 'Tool 1' },
|
||||
{ name: 'Tool 2', pluginKey: 'tool2', description: 'Tool 2' },
|
||||
);
|
||||
|
||||
// Simulate cache cleared state (returns null)
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
getCachedTools.mockResolvedValueOnce(null); // Global tools (cache cleared)
|
||||
|
||||
mockReq.config = {
|
||||
filteredTools: [],
|
||||
includedTools: [],
|
||||
availableTools: mockAppTools,
|
||||
};
|
||||
|
||||
// Mock setCachedTools to verify it's called to re-initialize
|
||||
const { setCachedTools } = require('~/server/services/Config');
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
// Should have re-initialized the cache with tools from appConfig
|
||||
expect(setCachedTools).toHaveBeenCalledWith(mockAppTools);
|
||||
|
||||
// Should still return tools successfully
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
const responseData = mockRes.json.mock.calls[0][0];
|
||||
expect(responseData).toHaveLength(2);
|
||||
expect(responseData.find((t) => t.pluginKey === 'tool1')).toBeDefined();
|
||||
expect(responseData.find((t) => t.pluginKey === 'tool2')).toBeDefined();
|
||||
});
|
||||
|
||||
it('should handle cache clear without appConfig.availableTools gracefully', async () => {
|
||||
// Setup: appConfig without availableTools
|
||||
getAppConfig.mockResolvedValue({
|
||||
filteredTools: [],
|
||||
includedTools: [],
|
||||
// No availableTools property
|
||||
});
|
||||
|
||||
// Clear availableTools array
|
||||
require('~/app/clients/tools').availableTools.length = 0;
|
||||
|
||||
// Cache returns null (cleared state)
|
||||
mockCache.get.mockResolvedValue(null);
|
||||
getCachedTools.mockResolvedValueOnce(null); // Global tools (cache cleared)
|
||||
|
||||
mockReq.config = {
|
||||
filteredTools: [],
|
||||
includedTools: [],
|
||||
// No availableTools
|
||||
};
|
||||
|
||||
await getAvailableTools(mockReq, mockRes);
|
||||
|
||||
// Should handle gracefully without crashing
|
||||
expect(mockRes.status).toHaveBeenCalledWith(200);
|
||||
expect(mockRes.json).toHaveBeenCalledWith([]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,26 +1,34 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { webSearchKeys, extractWebSearchEnvVars, normalizeHttpError } = require('@librechat/api');
|
||||
const { Tools, CacheKeys, Constants, FileSources } = require('librechat-data-provider');
|
||||
const {
|
||||
webSearchKeys,
|
||||
MCPOAuthHandler,
|
||||
MCPTokenStorage,
|
||||
normalizeHttpError,
|
||||
extractWebSearchEnvVars,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
getFiles,
|
||||
findToken,
|
||||
updateUser,
|
||||
deleteFiles,
|
||||
deleteConvos,
|
||||
deletePresets,
|
||||
deleteMessages,
|
||||
deleteUserById,
|
||||
deleteAllSharedLinks,
|
||||
deleteAllUserSessions,
|
||||
} = require('~/models');
|
||||
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
|
||||
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
|
||||
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
|
||||
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
|
||||
const { Tools, Constants, FileSources } = require('librechat-data-provider');
|
||||
const { processDeleteRequest } = require('~/server/services/Files/process');
|
||||
const { Transaction, Balance, User } = require('~/db/models');
|
||||
const { Transaction, Balance, User, Token } = require('~/db/models');
|
||||
const { getMCPManager, getFlowStateManager } = require('~/config');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
const { deleteToolCalls } = require('~/models/ToolCall');
|
||||
const { deleteAllSharedLinks } = require('~/models');
|
||||
const { getMCPManager } = require('~/config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
const getUserController = async (req, res) => {
|
||||
const appConfig = await getAppConfig({ role: req.user?.role });
|
||||
@@ -162,6 +170,15 @@ const updateUserPluginsController = async (req, res) => {
|
||||
);
|
||||
({ status, message } = normalizeHttpError(authService));
|
||||
}
|
||||
try {
|
||||
// if the MCP server uses OAuth, perform a full cleanup and token revocation
|
||||
await maybeUninstallOAuthMCP(user.id, pluginKey, appConfig);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`[updateUserPluginsController] Error uninstalling OAuth MCP for ${pluginKey}:`,
|
||||
error,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// This handles:
|
||||
// 1. Web_search uninstall (keys will be populated with all webSearchKeys if auth was {}).
|
||||
@@ -269,6 +286,94 @@ const resendVerificationController = async (req, res) => {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* OAuth MCP specific uninstall logic
|
||||
*/
|
||||
const maybeUninstallOAuthMCP = async (userId, pluginKey, appConfig) => {
|
||||
if (!pluginKey.startsWith(Constants.mcp_prefix)) {
|
||||
// this is not an MCP server, so nothing to do here
|
||||
return;
|
||||
}
|
||||
|
||||
const serverName = pluginKey.replace(Constants.mcp_prefix, '');
|
||||
const mcpManager = getMCPManager(userId);
|
||||
const serverConfig = mcpManager.getRawConfig(serverName) ?? appConfig?.mcpServers?.[serverName];
|
||||
|
||||
if (!mcpManager.getOAuthServers().has(serverName)) {
|
||||
// this server does not use OAuth, so nothing to do here as well
|
||||
return;
|
||||
}
|
||||
|
||||
// 1. get client info used for revocation (client id, secret)
|
||||
const clientTokenData = await MCPTokenStorage.getClientInfoAndMetadata({
|
||||
userId,
|
||||
serverName,
|
||||
findToken,
|
||||
});
|
||||
if (clientTokenData == null) {
|
||||
return;
|
||||
}
|
||||
const { clientInfo, clientMetadata } = clientTokenData;
|
||||
|
||||
// 2. get decrypted tokens before deletion
|
||||
const tokens = await MCPTokenStorage.getTokens({
|
||||
userId,
|
||||
serverName,
|
||||
findToken,
|
||||
});
|
||||
|
||||
// 3. revoke OAuth tokens at the provider
|
||||
const revocationEndpoint =
|
||||
serverConfig.oauth?.revocation_endpoint ?? clientMetadata.revocation_endpoint;
|
||||
const revocationEndpointAuthMethodsSupported =
|
||||
serverConfig.oauth?.revocation_endpoint_auth_methods_supported ??
|
||||
clientMetadata.revocation_endpoint_auth_methods_supported;
|
||||
|
||||
if (tokens?.access_token) {
|
||||
try {
|
||||
await MCPOAuthHandler.revokeOAuthToken(serverName, tokens.access_token, 'access', {
|
||||
serverUrl: serverConfig.url,
|
||||
clientId: clientInfo.client_id,
|
||||
clientSecret: clientInfo.client_secret ?? '',
|
||||
revocationEndpoint,
|
||||
revocationEndpointAuthMethodsSupported,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`Error revoking OAuth access token for ${serverName}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
if (tokens?.refresh_token) {
|
||||
try {
|
||||
await MCPOAuthHandler.revokeOAuthToken(serverName, tokens.refresh_token, 'refresh', {
|
||||
serverUrl: serverConfig.url,
|
||||
clientId: clientInfo.client_id,
|
||||
clientSecret: clientInfo.client_secret ?? '',
|
||||
revocationEndpoint,
|
||||
revocationEndpointAuthMethodsSupported,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`Error revoking OAuth refresh token for ${serverName}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
// 4. delete tokens from the DB after revocation attempts
|
||||
await MCPTokenStorage.deleteUserTokens({
|
||||
userId,
|
||||
serverName,
|
||||
deleteToken: async (filter) => {
|
||||
await Token.deleteOne(filter);
|
||||
},
|
||||
});
|
||||
|
||||
// 5. clear the flow state for the OAuth tokens
|
||||
const flowsCache = getLogStores(CacheKeys.FLOWS);
|
||||
const flowManager = getFlowStateManager(flowsCache);
|
||||
const flowId = MCPOAuthHandler.generateFlowId(userId, serverName);
|
||||
await flowManager.deleteFlow(flowId, 'mcp_get_tokens');
|
||||
await flowManager.deleteFlow(flowId, 'mcp_oauth');
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getUserController,
|
||||
getTermsStatusController,
|
||||
|
||||
342
api/server/controllers/agents/__tests__/callbacks.spec.js
Normal file
342
api/server/controllers/agents/__tests__/callbacks.spec.js
Normal file
@@ -0,0 +1,342 @@
|
||||
const { Tools } = require('librechat-data-provider');
|
||||
|
||||
// Mock all dependencies before requiring the module
|
||||
jest.mock('nanoid', () => ({
|
||||
nanoid: jest.fn(() => 'mock-id'),
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/api', () => ({
|
||||
sendEvent: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/data-schemas', () => ({
|
||||
logger: {
|
||||
error: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/agents', () => ({
|
||||
EnvVar: { CODE_API_KEY: 'CODE_API_KEY' },
|
||||
Providers: { GOOGLE: 'google' },
|
||||
GraphEvents: {},
|
||||
getMessageId: jest.fn(),
|
||||
ToolEndHandler: jest.fn(),
|
||||
handleToolCalls: jest.fn(),
|
||||
ChatModelStreamHandler: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Files/Citations', () => ({
|
||||
processFileCitations: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Files/Code/process', () => ({
|
||||
processCodeOutput: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Tools/credentials', () => ({
|
||||
loadAuthValues: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Files/process', () => ({
|
||||
saveBase64Image: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('createToolEndCallback', () => {
|
||||
let req, res, artifactPromises, createToolEndCallback;
|
||||
let logger;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Get the mocked logger
|
||||
logger = require('@librechat/data-schemas').logger;
|
||||
|
||||
// Now require the module after all mocks are set up
|
||||
const callbacks = require('../callbacks');
|
||||
createToolEndCallback = callbacks.createToolEndCallback;
|
||||
|
||||
req = {
|
||||
user: { id: 'user123' },
|
||||
};
|
||||
res = {
|
||||
headersSent: false,
|
||||
write: jest.fn(),
|
||||
};
|
||||
artifactPromises = [];
|
||||
});
|
||||
|
||||
describe('ui_resources artifact handling', () => {
|
||||
it('should process ui_resources artifact and return attachment when headers not sent', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
artifact: {
|
||||
[Tools.ui_resources]: {
|
||||
data: {
|
||||
0: { type: 'button', label: 'Click me' },
|
||||
1: { type: 'input', placeholder: 'Enter text' },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
|
||||
// Wait for all promises to resolve
|
||||
const results = await Promise.all(artifactPromises);
|
||||
|
||||
// When headers are not sent, it returns attachment without writing
|
||||
expect(res.write).not.toHaveBeenCalled();
|
||||
|
||||
const attachment = results[0];
|
||||
expect(attachment).toEqual({
|
||||
type: Tools.ui_resources,
|
||||
messageId: 'run456',
|
||||
toolCallId: 'tool123',
|
||||
conversationId: 'thread789',
|
||||
[Tools.ui_resources]: {
|
||||
0: { type: 'button', label: 'Click me' },
|
||||
1: { type: 'input', placeholder: 'Enter text' },
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should write to response when headers are already sent', async () => {
|
||||
res.headersSent = true;
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
artifact: {
|
||||
[Tools.ui_resources]: {
|
||||
data: {
|
||||
0: { type: 'carousel', items: [] },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
const results = await Promise.all(artifactPromises);
|
||||
|
||||
expect(res.write).toHaveBeenCalled();
|
||||
expect(results[0]).toEqual({
|
||||
type: Tools.ui_resources,
|
||||
messageId: 'run456',
|
||||
toolCallId: 'tool123',
|
||||
conversationId: 'thread789',
|
||||
[Tools.ui_resources]: {
|
||||
0: { type: 'carousel', items: [] },
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle errors when processing ui_resources', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
// Mock res.write to throw an error
|
||||
res.headersSent = true;
|
||||
res.write.mockImplementation(() => {
|
||||
throw new Error('Write failed');
|
||||
});
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
artifact: {
|
||||
[Tools.ui_resources]: {
|
||||
data: {
|
||||
0: { type: 'test' },
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
const results = await Promise.all(artifactPromises);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
'Error processing artifact content:',
|
||||
expect.any(Error),
|
||||
);
|
||||
expect(results[0]).toBeNull();
|
||||
});
|
||||
|
||||
it('should handle multiple artifacts including ui_resources', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
artifact: {
|
||||
[Tools.ui_resources]: {
|
||||
data: {
|
||||
0: { type: 'chart', data: [] },
|
||||
},
|
||||
},
|
||||
[Tools.web_search]: {
|
||||
results: ['result1', 'result2'],
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
const results = await Promise.all(artifactPromises);
|
||||
|
||||
// Both ui_resources and web_search should be processed
|
||||
expect(artifactPromises).toHaveLength(2);
|
||||
expect(results).toHaveLength(2);
|
||||
|
||||
// Check ui_resources attachment
|
||||
const uiResourceAttachment = results.find((r) => r?.type === Tools.ui_resources);
|
||||
expect(uiResourceAttachment).toBeTruthy();
|
||||
expect(uiResourceAttachment[Tools.ui_resources]).toEqual({
|
||||
0: { type: 'chart', data: [] },
|
||||
});
|
||||
|
||||
// Check web_search attachment
|
||||
const webSearchAttachment = results.find((r) => r?.type === Tools.web_search);
|
||||
expect(webSearchAttachment).toBeTruthy();
|
||||
expect(webSearchAttachment[Tools.web_search]).toEqual({
|
||||
results: ['result1', 'result2'],
|
||||
});
|
||||
});
|
||||
|
||||
it('should not process artifacts when output has no artifacts', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
content: 'Some regular content',
|
||||
// No artifact property
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
|
||||
expect(artifactPromises).toHaveLength(0);
|
||||
expect(res.write).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty ui_resources data object', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
artifact: {
|
||||
[Tools.ui_resources]: {
|
||||
data: {},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
const results = await Promise.all(artifactPromises);
|
||||
|
||||
expect(results[0]).toEqual({
|
||||
type: Tools.ui_resources,
|
||||
messageId: 'run456',
|
||||
toolCallId: 'tool123',
|
||||
conversationId: 'thread789',
|
||||
[Tools.ui_resources]: {},
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle ui_resources with complex nested data', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const complexData = {
|
||||
0: {
|
||||
type: 'form',
|
||||
fields: [
|
||||
{ name: 'field1', type: 'text', required: true },
|
||||
{ name: 'field2', type: 'select', options: ['a', 'b', 'c'] },
|
||||
],
|
||||
nested: {
|
||||
deep: {
|
||||
value: 123,
|
||||
array: [1, 2, 3],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const output = {
|
||||
tool_call_id: 'tool123',
|
||||
artifact: {
|
||||
[Tools.ui_resources]: {
|
||||
data: complexData,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output }, metadata);
|
||||
const results = await Promise.all(artifactPromises);
|
||||
|
||||
expect(results[0][Tools.ui_resources]).toEqual(complexData);
|
||||
});
|
||||
|
||||
it('should handle when output is undefined', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback({ output: undefined }, metadata);
|
||||
|
||||
expect(artifactPromises).toHaveLength(0);
|
||||
expect(res.write).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle when data parameter is undefined', async () => {
|
||||
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises });
|
||||
|
||||
const metadata = {
|
||||
run_id: 'run456',
|
||||
thread_id: 'thread789',
|
||||
};
|
||||
|
||||
await toolEndCallback(undefined, metadata);
|
||||
|
||||
expect(artifactPromises).toHaveLength(0);
|
||||
expect(res.write).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -158,7 +158,7 @@ describe('duplicateAgent', () => {
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool_resources.ocr correctly', async () => {
|
||||
it('should convert `tool_resources.ocr` to `tool_resources.context`', async () => {
|
||||
const mockAgent = {
|
||||
id: 'agent_123',
|
||||
name: 'Test Agent',
|
||||
@@ -178,7 +178,7 @@ describe('duplicateAgent', () => {
|
||||
expect(createAgent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tool_resources: {
|
||||
ocr: { enabled: true, config: 'test' },
|
||||
context: { enabled: true, config: 'test' },
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
@@ -265,6 +265,30 @@ function createToolEndCallback({ req, res, artifactPromises }) {
|
||||
);
|
||||
}
|
||||
|
||||
// TODO: a lot of duplicated code in createToolEndCallback
|
||||
// we should refactor this to use a helper function in a follow-up PR
|
||||
if (output.artifact[Tools.ui_resources]) {
|
||||
artifactPromises.push(
|
||||
(async () => {
|
||||
const attachment = {
|
||||
type: Tools.ui_resources,
|
||||
messageId: metadata.run_id,
|
||||
toolCallId: output.tool_call_id,
|
||||
conversationId: metadata.thread_id,
|
||||
[Tools.ui_resources]: output.artifact[Tools.ui_resources].data,
|
||||
};
|
||||
if (!res.headersSent) {
|
||||
return attachment;
|
||||
}
|
||||
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
|
||||
return attachment;
|
||||
})().catch((error) => {
|
||||
logger.error('Error processing artifact content:', error);
|
||||
return null;
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
if (output.artifact[Tools.web_search]) {
|
||||
artifactPromises.push(
|
||||
(async () => {
|
||||
|
||||
@@ -872,11 +872,10 @@ class AgentClient extends BaseClient {
|
||||
if (agent.useLegacyContent === true) {
|
||||
messages = formatContentStrings(messages);
|
||||
}
|
||||
if (
|
||||
agent.model_parameters?.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes(
|
||||
'prompt-caching',
|
||||
)
|
||||
) {
|
||||
const defaultHeaders =
|
||||
agent.model_parameters?.clientOptions?.defaultHeaders ??
|
||||
agent.model_parameters?.configuration?.defaultHeaders;
|
||||
if (defaultHeaders?.['anthropic-beta']?.includes('prompt-caching')) {
|
||||
messages = addCacheControl(messages);
|
||||
}
|
||||
|
||||
@@ -1122,6 +1121,13 @@ class AgentClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
if (endpointConfig?.titleConvo === false) {
|
||||
logger.debug(
|
||||
`[api/server/controllers/agents/client.js #titleConvo] Title generation disabled for endpoint "${endpoint}"`,
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (endpointConfig?.titleEndpoint && endpointConfig.titleEndpoint !== endpoint) {
|
||||
try {
|
||||
titleProviderConfig = getProviderConfig({
|
||||
@@ -1131,7 +1137,7 @@ class AgentClient extends BaseClient {
|
||||
endpoint = endpointConfig.titleEndpoint;
|
||||
} catch (error) {
|
||||
logger.warn(
|
||||
`[api/server/controllers/agents/client.js #titleConvo] Error getting title endpoint config for ${endpointConfig.titleEndpoint}, falling back to default`,
|
||||
`[api/server/controllers/agents/client.js #titleConvo] Error getting title endpoint config for "${endpointConfig.titleEndpoint}", falling back to default`,
|
||||
error,
|
||||
);
|
||||
// Fall back to original provider config
|
||||
|
||||
@@ -263,6 +263,125 @@ describe('AgentClient - titleConvo', () => {
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should skip title generation when titleConvo is set to false', async () => {
|
||||
// Set titleConvo to false in endpoint config
|
||||
mockReq.config = {
|
||||
endpoints: {
|
||||
[EModelEndpoint.openAI]: {
|
||||
titleConvo: false,
|
||||
titleModel: 'gpt-3.5-turbo',
|
||||
titlePrompt: 'Custom title prompt',
|
||||
titleMethod: 'structured',
|
||||
titlePromptTemplate: 'Template: {{content}}',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const text = 'Test conversation text';
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await client.titleConvo({ text, abortController });
|
||||
|
||||
// Should return undefined without generating title
|
||||
expect(result).toBeUndefined();
|
||||
|
||||
// generateTitle should NOT have been called
|
||||
expect(mockRun.generateTitle).not.toHaveBeenCalled();
|
||||
|
||||
// recordCollectedUsage should NOT have been called
|
||||
expect(client.recordCollectedUsage).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should skip title generation when titleConvo is false in all config', async () => {
|
||||
// Set titleConvo to false in "all" config
|
||||
mockReq.config = {
|
||||
endpoints: {
|
||||
all: {
|
||||
titleConvo: false,
|
||||
titleModel: 'gpt-4o-mini',
|
||||
titlePrompt: 'All config title prompt',
|
||||
titleMethod: 'completion',
|
||||
titlePromptTemplate: 'All config template',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const text = 'Test conversation text';
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await client.titleConvo({ text, abortController });
|
||||
|
||||
// Should return undefined without generating title
|
||||
expect(result).toBeUndefined();
|
||||
|
||||
// generateTitle should NOT have been called
|
||||
expect(mockRun.generateTitle).not.toHaveBeenCalled();
|
||||
|
||||
// recordCollectedUsage should NOT have been called
|
||||
expect(client.recordCollectedUsage).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should skip title generation when titleConvo is false for custom endpoint scenario', async () => {
|
||||
// This test validates the behavior when customEndpointConfig (retrieved via
|
||||
// getProviderConfig for custom endpoints) has titleConvo: false.
|
||||
//
|
||||
// The code path is:
|
||||
// 1. endpoints?.all is checked (undefined in this test)
|
||||
// 2. endpoints?.[endpoint] is checked (our test config)
|
||||
// 3. Would fall back to titleProviderConfig.customEndpointConfig (for real custom endpoints)
|
||||
//
|
||||
// We simulate a custom endpoint scenario using a dynamically named endpoint config
|
||||
|
||||
// Create a unique endpoint name that represents a custom endpoint
|
||||
const customEndpointName = 'customEndpoint';
|
||||
|
||||
// Configure the endpoint to have titleConvo: false
|
||||
// This simulates what would be in customEndpointConfig for a real custom endpoint
|
||||
mockReq.config = {
|
||||
endpoints: {
|
||||
// No 'all' config - so it will check endpoints[endpoint]
|
||||
// This config represents what customEndpointConfig would contain
|
||||
[customEndpointName]: {
|
||||
titleConvo: false,
|
||||
titleModel: 'custom-model-v1',
|
||||
titlePrompt: 'Custom endpoint title prompt',
|
||||
titleMethod: 'completion',
|
||||
titlePromptTemplate: 'Custom template: {{content}}',
|
||||
baseURL: 'https://api.custom-llm.com/v1',
|
||||
apiKey: 'test-custom-key',
|
||||
// Additional custom endpoint properties
|
||||
models: {
|
||||
default: ['custom-model-v1', 'custom-model-v2'],
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Set up agent to use our custom endpoint
|
||||
// Use openAI as base but override with custom endpoint name for this test
|
||||
mockAgent.endpoint = EModelEndpoint.openAI;
|
||||
mockAgent.provider = EModelEndpoint.openAI;
|
||||
|
||||
// Override the endpoint in the config to point to our custom config
|
||||
mockReq.config.endpoints[EModelEndpoint.openAI] =
|
||||
mockReq.config.endpoints[customEndpointName];
|
||||
delete mockReq.config.endpoints[customEndpointName];
|
||||
|
||||
const text = 'Test custom endpoint conversation';
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await client.titleConvo({ text, abortController });
|
||||
|
||||
// Should return undefined without generating title because titleConvo is false
|
||||
expect(result).toBeUndefined();
|
||||
|
||||
// generateTitle should NOT have been called
|
||||
expect(mockRun.generateTitle).not.toHaveBeenCalled();
|
||||
|
||||
// recordCollectedUsage should NOT have been called
|
||||
expect(client.recordCollectedUsage).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should pass titleEndpoint configuration to generateTitle', async () => {
|
||||
// Mock the API key just for this test
|
||||
const originalApiKey = process.env.ANTHROPIC_API_KEY;
|
||||
|
||||
@@ -2,7 +2,12 @@ const { z } = require('zod');
|
||||
const fs = require('fs').promises;
|
||||
const { nanoid } = require('nanoid');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { agentCreateSchema, agentUpdateSchema } = require('@librechat/api');
|
||||
const {
|
||||
agentCreateSchema,
|
||||
agentUpdateSchema,
|
||||
mergeAgentOcrConversion,
|
||||
convertOcrToContextInPlace,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
@@ -66,7 +71,7 @@ const createAgentHandler = async (req, res) => {
|
||||
agentData.author = userId;
|
||||
agentData.tools = [];
|
||||
|
||||
const availableTools = await getCachedTools({ includeGlobal: true });
|
||||
const availableTools = await getCachedTools();
|
||||
for (const tool of tools) {
|
||||
if (availableTools[tool]) {
|
||||
agentData.tools.push(tool);
|
||||
@@ -198,19 +203,32 @@ const getAgentHandler = async (req, res, expandProperties = false) => {
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @param {AgentUpdateParams} req.body - The Agent update parameters.
|
||||
* @returns {Agent} 200 - success response - application/json
|
||||
* @returns {Promise<Agent>} 200 - success response - application/json
|
||||
*/
|
||||
const updateAgentHandler = async (req, res) => {
|
||||
try {
|
||||
const id = req.params.id;
|
||||
const validatedData = agentUpdateSchema.parse(req.body);
|
||||
const { _id, ...updateData } = removeNullishValues(validatedData);
|
||||
|
||||
// Convert OCR to context in incoming updateData
|
||||
convertOcrToContextInPlace(updateData);
|
||||
|
||||
const existingAgent = await getAgent({ id });
|
||||
|
||||
if (!existingAgent) {
|
||||
return res.status(404).json({ error: 'Agent not found' });
|
||||
}
|
||||
|
||||
// Convert legacy OCR tool resource to context format in existing agent
|
||||
const ocrConversion = mergeAgentOcrConversion(existingAgent, updateData);
|
||||
if (ocrConversion.tool_resources) {
|
||||
updateData.tool_resources = ocrConversion.tool_resources;
|
||||
}
|
||||
if (ocrConversion.tools) {
|
||||
updateData.tools = ocrConversion.tools;
|
||||
}
|
||||
|
||||
let updatedAgent =
|
||||
Object.keys(updateData).length > 0
|
||||
? await updateAgent({ id }, updateData, {
|
||||
@@ -255,7 +273,7 @@ const updateAgentHandler = async (req, res) => {
|
||||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @returns {Agent} 201 - success response - application/json
|
||||
* @returns {Promise<Agent>} 201 - success response - application/json
|
||||
*/
|
||||
const duplicateAgentHandler = async (req, res) => {
|
||||
const { id } = req.params;
|
||||
@@ -288,9 +306,19 @@ const duplicateAgentHandler = async (req, res) => {
|
||||
hour12: false,
|
||||
})})`;
|
||||
|
||||
if (_tool_resources?.[EToolResources.context]) {
|
||||
cloneData.tool_resources = {
|
||||
[EToolResources.context]: _tool_resources[EToolResources.context],
|
||||
};
|
||||
}
|
||||
|
||||
if (_tool_resources?.[EToolResources.ocr]) {
|
||||
cloneData.tool_resources = {
|
||||
[EToolResources.ocr]: _tool_resources[EToolResources.ocr],
|
||||
/** Legacy conversion from `ocr` to `context` */
|
||||
[EToolResources.context]: {
|
||||
...(_tool_resources[EToolResources.context] ?? {}),
|
||||
..._tool_resources[EToolResources.ocr],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
@@ -382,7 +410,7 @@ const duplicateAgentHandler = async (req, res) => {
|
||||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Agent identifier.
|
||||
* @returns {Agent} 200 - success response - application/json
|
||||
* @returns {Promise<Agent>} 200 - success response - application/json
|
||||
*/
|
||||
const deleteAgentHandler = async (req, res) => {
|
||||
try {
|
||||
@@ -484,7 +512,7 @@ const getListAgentsHandler = async (req, res) => {
|
||||
* @param {Express.Multer.File} req.file - The avatar image file.
|
||||
* @param {object} req.body - Request body
|
||||
* @param {string} [req.body.avatar] - Optional avatar for the agent's avatar.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
* @returns {Promise<void>} 200 - success response - application/json
|
||||
*/
|
||||
const uploadAgentAvatarHandler = async (req, res) => {
|
||||
try {
|
||||
|
||||
@@ -512,6 +512,7 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
|
||||
mockReq.params.id = existingAgentId;
|
||||
mockReq.body = {
|
||||
tool_resources: {
|
||||
/** Legacy conversion from `ocr` to `context` */
|
||||
ocr: {
|
||||
file_ids: ['ocr1', 'ocr2'],
|
||||
},
|
||||
@@ -531,7 +532,8 @@ describe('Agent Controllers - Mass Assignment Protection', () => {
|
||||
|
||||
const updatedAgent = mockRes.json.mock.calls[0][0];
|
||||
expect(updatedAgent.tool_resources).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.ocr).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.ocr).toBeUndefined();
|
||||
expect(updatedAgent.tool_resources.context).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.execute_code).toBeDefined();
|
||||
expect(updatedAgent.tool_resources.invalid_tool).toBeUndefined();
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const { v4 } = require('uuid');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { sendEvent, getBalanceConfig } = require('@librechat/api');
|
||||
const { sendEvent, getBalanceConfig, getModelMaxTokens } = require('@librechat/api');
|
||||
const {
|
||||
Time,
|
||||
Constants,
|
||||
@@ -34,7 +34,6 @@ const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { countTokens } = require('~/server/utils');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const { v4 } = require('uuid');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { sendEvent, getBalanceConfig } = require('@librechat/api');
|
||||
const { sendEvent, getBalanceConfig, getModelMaxTokens } = require('@librechat/api');
|
||||
const {
|
||||
Time,
|
||||
Constants,
|
||||
@@ -31,7 +31,6 @@ const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { countTokens } = require('~/server/utils');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
|
||||
/**
|
||||
|
||||
@@ -31,7 +31,7 @@ const createAssistant = async (req, res) => {
|
||||
delete assistantData.conversation_starters;
|
||||
delete assistantData.append_current_datetime;
|
||||
|
||||
const toolDefinitions = await getCachedTools({ includeGlobal: true });
|
||||
const toolDefinitions = await getCachedTools();
|
||||
|
||||
assistantData.tools = tools
|
||||
.map((tool) => {
|
||||
@@ -136,7 +136,7 @@ const patchAssistant = async (req, res) => {
|
||||
...updateData
|
||||
} = req.body;
|
||||
|
||||
const toolDefinitions = await getCachedTools({ includeGlobal: true });
|
||||
const toolDefinitions = await getCachedTools();
|
||||
|
||||
updateData.tools = (updateData.tools ?? [])
|
||||
.map((tool) => {
|
||||
|
||||
@@ -28,7 +28,7 @@ const createAssistant = async (req, res) => {
|
||||
delete assistantData.conversation_starters;
|
||||
delete assistantData.append_current_datetime;
|
||||
|
||||
const toolDefinitions = await getCachedTools({ includeGlobal: true });
|
||||
const toolDefinitions = await getCachedTools();
|
||||
|
||||
assistantData.tools = tools
|
||||
.map((tool) => {
|
||||
@@ -125,7 +125,7 @@ const updateAssistant = async ({ req, openai, assistant_id, updateData }) => {
|
||||
|
||||
let hasFileSearch = false;
|
||||
for (const tool of updateData.tools ?? []) {
|
||||
const toolDefinitions = await getCachedTools({ includeGlobal: true });
|
||||
const toolDefinitions = await getCachedTools();
|
||||
let actualTool = typeof tool === 'string' ? toolDefinitions[tool] : tool;
|
||||
|
||||
if (!actualTool && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {
|
||||
|
||||
126
api/server/controllers/mcp.js
Normal file
126
api/server/controllers/mcp.js
Normal file
@@ -0,0 +1,126 @@
|
||||
/**
|
||||
* MCP Tools Controller
|
||||
* Handles MCP-specific tool endpoints, decoupled from regular LibreChat tools
|
||||
*/
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const {
|
||||
cacheMCPServerTools,
|
||||
getMCPServerTools,
|
||||
getAppConfig,
|
||||
} = require('~/server/services/Config');
|
||||
const { getMCPManager } = require('~/config');
|
||||
|
||||
/**
|
||||
* Get all MCP tools available to the user
|
||||
*/
|
||||
const getMCPTools = async (req, res) => {
|
||||
try {
|
||||
const userId = req.user?.id;
|
||||
if (!userId) {
|
||||
logger.warn('[getMCPTools] User ID not found in request');
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
|
||||
const appConfig = req.config ?? (await getAppConfig({ role: req.user?.role }));
|
||||
if (!appConfig?.mcpConfig) {
|
||||
return res.status(200).json({ servers: {} });
|
||||
}
|
||||
|
||||
const mcpManager = getMCPManager();
|
||||
const configuredServers = Object.keys(appConfig.mcpConfig);
|
||||
const mcpServers = {};
|
||||
|
||||
const cachePromises = configuredServers.map((serverName) =>
|
||||
getMCPServerTools(serverName).then((tools) => ({ serverName, tools })),
|
||||
);
|
||||
const cacheResults = await Promise.all(cachePromises);
|
||||
|
||||
const serverToolsMap = new Map();
|
||||
for (const { serverName, tools } of cacheResults) {
|
||||
if (tools) {
|
||||
serverToolsMap.set(serverName, tools);
|
||||
continue;
|
||||
}
|
||||
|
||||
const serverTools = await mcpManager.getServerToolFunctions(userId, serverName);
|
||||
if (!serverTools) {
|
||||
logger.debug(`[getMCPTools] No tools found for server ${serverName}`);
|
||||
continue;
|
||||
}
|
||||
serverToolsMap.set(serverName, serverTools);
|
||||
|
||||
if (Object.keys(serverTools).length > 0) {
|
||||
// Cache asynchronously without blocking
|
||||
cacheMCPServerTools({ serverName, serverTools }).catch((err) =>
|
||||
logger.error(`[getMCPTools] Failed to cache tools for ${serverName}:`, err),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Process each configured server
|
||||
for (const serverName of configuredServers) {
|
||||
try {
|
||||
const serverTools = serverToolsMap.get(serverName);
|
||||
|
||||
// Get server config once
|
||||
const serverConfig = appConfig.mcpConfig[serverName];
|
||||
const rawServerConfig = mcpManager.getRawConfig(serverName);
|
||||
|
||||
// Initialize server object with all server-level data
|
||||
const server = {
|
||||
name: serverName,
|
||||
icon: rawServerConfig?.iconPath || '',
|
||||
authenticated: true,
|
||||
authConfig: [],
|
||||
tools: [],
|
||||
};
|
||||
|
||||
// Set authentication config once for the server
|
||||
if (serverConfig?.customUserVars) {
|
||||
const customVarKeys = Object.keys(serverConfig.customUserVars);
|
||||
if (customVarKeys.length > 0) {
|
||||
server.authConfig = Object.entries(serverConfig.customUserVars).map(([key, value]) => ({
|
||||
authField: key,
|
||||
label: value.title || key,
|
||||
description: value.description || '',
|
||||
}));
|
||||
server.authenticated = false;
|
||||
}
|
||||
}
|
||||
|
||||
// Process tools efficiently - no need for convertMCPToolToPlugin
|
||||
if (serverTools) {
|
||||
for (const [toolKey, toolData] of Object.entries(serverTools)) {
|
||||
if (!toolData.function || !toolKey.includes(Constants.mcp_delimiter)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const toolName = toolKey.split(Constants.mcp_delimiter)[0];
|
||||
server.tools.push({
|
||||
name: toolName,
|
||||
pluginKey: toolKey,
|
||||
description: toolData.function.description || '',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Only add server if it has tools or is configured
|
||||
if (server.tools.length > 0 || serverConfig) {
|
||||
mcpServers[serverName] = server;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[getMCPTools] Error loading tools for server ${serverName}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
res.status(200).json({ servers: mcpServers });
|
||||
} catch (error) {
|
||||
logger.error('[getMCPTools]', error);
|
||||
res.status(500).json({ message: error.message });
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getMCPTools,
|
||||
};
|
||||
@@ -12,6 +12,7 @@ const { logger } = require('@librechat/data-schemas');
|
||||
const mongoSanitize = require('express-mongo-sanitize');
|
||||
const { isEnabled, ErrorController } = require('@librechat/api');
|
||||
const { connectDb, indexSync } = require('~/db');
|
||||
const initializeOAuthReconnectManager = require('./services/initializeOAuthReconnectManager');
|
||||
const createValidateImageRequest = require('./middleware/validateImageRequest');
|
||||
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
|
||||
const { updateInterfacePermissions } = require('~/models/interface');
|
||||
@@ -154,7 +155,7 @@ const startServer = async () => {
|
||||
res.send(updatedIndexHtml);
|
||||
});
|
||||
|
||||
app.listen(port, host, () => {
|
||||
app.listen(port, host, async () => {
|
||||
if (host === '0.0.0.0') {
|
||||
logger.info(
|
||||
`Server listening on all interfaces at port ${port}. Use http://localhost:${port} to access it`,
|
||||
@@ -163,7 +164,9 @@ const startServer = async () => {
|
||||
logger.info(`Server listening at http://${host == '0.0.0.0' ? 'localhost' : host}:${port}`);
|
||||
}
|
||||
|
||||
initializeMCPs().then(() => checkMigrations());
|
||||
await initializeMCPs();
|
||||
await initializeOAuthReconnectManager();
|
||||
await checkMigrations();
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { PermissionBits, hasPermissions, ResourceType } = require('librechat-data-provider');
|
||||
const { getEffectivePermissions } = require('~/server/services/PermissionService');
|
||||
const { getAgent } = require('~/models/Agent');
|
||||
const { getAgents } = require('~/models/Agent');
|
||||
const { getFiles } = require('~/models/File');
|
||||
|
||||
/**
|
||||
@@ -10,11 +10,12 @@ const { getFiles } = require('~/models/File');
|
||||
*/
|
||||
const checkAgentBasedFileAccess = async ({ userId, role, fileId }) => {
|
||||
try {
|
||||
// Find agents that have this file in their tool_resources
|
||||
const agentsWithFile = await getAgent({
|
||||
/** Agents that have this file in their tool_resources */
|
||||
const agentsWithFile = await getAgents({
|
||||
$or: [
|
||||
{ 'tool_resources.file_search.file_ids': fileId },
|
||||
{ 'tool_resources.execute_code.file_ids': fileId },
|
||||
{ 'tool_resources.file_search.file_ids': fileId },
|
||||
{ 'tool_resources.context.file_ids': fileId },
|
||||
{ 'tool_resources.ocr.file_ids': fileId },
|
||||
],
|
||||
});
|
||||
@@ -24,7 +25,7 @@ const checkAgentBasedFileAccess = async ({ userId, role, fileId }) => {
|
||||
}
|
||||
|
||||
// Check if user has access to any of these agents
|
||||
for (const agent of Array.isArray(agentsWithFile) ? agentsWithFile : [agentsWithFile]) {
|
||||
for (const agent of agentsWithFile) {
|
||||
// Check if user is the agent author
|
||||
if (agent.author && agent.author.toString() === userId) {
|
||||
logger.debug(`[fileAccess] User is author of agent ${agent.id}`);
|
||||
@@ -83,7 +84,6 @@ const fileAccess = async (req, res, next) => {
|
||||
});
|
||||
}
|
||||
|
||||
// Get the file
|
||||
const [file] = await getFiles({ file_id: fileId });
|
||||
if (!file) {
|
||||
return res.status(404).json({
|
||||
@@ -92,20 +92,18 @@ const fileAccess = async (req, res, next) => {
|
||||
});
|
||||
}
|
||||
|
||||
// Check if user owns the file
|
||||
if (file.user && file.user.toString() === userId) {
|
||||
req.fileAccess = { file };
|
||||
return next();
|
||||
}
|
||||
|
||||
// Check agent-based access (file inherits agent permissions)
|
||||
/** Agent-based access (file inherits agent permissions) */
|
||||
const hasAgentAccess = await checkAgentBasedFileAccess({ userId, role: userRole, fileId });
|
||||
if (hasAgentAccess) {
|
||||
req.fileAccess = { file };
|
||||
return next();
|
||||
}
|
||||
|
||||
// No access
|
||||
logger.warn(`[fileAccess] User ${userId} denied access to file ${fileId}`);
|
||||
return res.status(403).json({
|
||||
error: 'Forbidden',
|
||||
|
||||
483
api/server/middleware/accessResources/fileAccess.spec.js
Normal file
483
api/server/middleware/accessResources/fileAccess.spec.js
Normal file
@@ -0,0 +1,483 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { ResourceType, PrincipalType, PrincipalModel } = require('librechat-data-provider');
|
||||
const { MongoMemoryServer } = require('mongodb-memory-server');
|
||||
const { fileAccess } = require('./fileAccess');
|
||||
const { User, Role, AclEntry } = require('~/db/models');
|
||||
const { createAgent } = require('~/models/Agent');
|
||||
const { createFile } = require('~/models/File');
|
||||
|
||||
describe('fileAccess middleware', () => {
|
||||
let mongoServer;
|
||||
let req, res, next;
|
||||
let testUser, otherUser, thirdUser;
|
||||
|
||||
beforeAll(async () => {
|
||||
mongoServer = await MongoMemoryServer.create();
|
||||
const mongoUri = mongoServer.getUri();
|
||||
await mongoose.connect(mongoUri);
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
await mongoose.disconnect();
|
||||
await mongoServer.stop();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
await mongoose.connection.dropDatabase();
|
||||
|
||||
// Create test role
|
||||
await Role.create({
|
||||
name: 'test-role',
|
||||
permissions: {
|
||||
AGENTS: {
|
||||
USE: true,
|
||||
CREATE: true,
|
||||
SHARED_GLOBAL: false,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Create test users
|
||||
testUser = await User.create({
|
||||
email: 'test@example.com',
|
||||
name: 'Test User',
|
||||
username: 'testuser',
|
||||
role: 'test-role',
|
||||
});
|
||||
|
||||
otherUser = await User.create({
|
||||
email: 'other@example.com',
|
||||
name: 'Other User',
|
||||
username: 'otheruser',
|
||||
role: 'test-role',
|
||||
});
|
||||
|
||||
thirdUser = await User.create({
|
||||
email: 'third@example.com',
|
||||
name: 'Third User',
|
||||
username: 'thirduser',
|
||||
role: 'test-role',
|
||||
});
|
||||
|
||||
// Setup request/response objects
|
||||
req = {
|
||||
user: { id: testUser._id.toString(), role: testUser.role },
|
||||
params: {},
|
||||
};
|
||||
res = {
|
||||
status: jest.fn().mockReturnThis(),
|
||||
json: jest.fn(),
|
||||
};
|
||||
next = jest.fn();
|
||||
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('basic file access', () => {
|
||||
test('should allow access when user owns the file', async () => {
|
||||
// Create a file owned by testUser
|
||||
await createFile({
|
||||
user: testUser._id.toString(),
|
||||
file_id: 'file_owned_by_user',
|
||||
filepath: '/test/file.txt',
|
||||
filename: 'file.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
req.params.file_id = 'file_owned_by_user';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
expect(req.fileAccess.file).toBeDefined();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should deny access when user does not own the file and no agent access', async () => {
|
||||
// Create a file owned by otherUser
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'file_owned_by_other',
|
||||
filepath: '/test/file.txt',
|
||||
filename: 'file.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
req.params.file_id = 'file_owned_by_other';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Forbidden',
|
||||
message: 'Insufficient permissions to access this file',
|
||||
});
|
||||
});
|
||||
|
||||
test('should return 404 when file does not exist', async () => {
|
||||
req.params.file_id = 'non_existent_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(404);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Not Found',
|
||||
message: 'File not found',
|
||||
});
|
||||
});
|
||||
|
||||
test('should return 400 when file_id is missing', async () => {
|
||||
// Don't set file_id in params
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(400);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Bad Request',
|
||||
message: 'file_id is required',
|
||||
});
|
||||
});
|
||||
|
||||
test('should return 401 when user is not authenticated', async () => {
|
||||
req.user = null;
|
||||
req.params.file_id = 'some_file';
|
||||
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(401);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Unauthorized',
|
||||
message: 'Authentication required',
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('agent-based file access', () => {
|
||||
beforeEach(async () => {
|
||||
// Create a file owned by otherUser (not testUser)
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'shared_file_via_agent',
|
||||
filepath: '/test/shared.txt',
|
||||
filename: 'shared.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
});
|
||||
|
||||
test('should allow access when user is author of agent with file', async () => {
|
||||
// Create agent owned by testUser with the file
|
||||
await createAgent({
|
||||
id: `agent_${Date.now()}`,
|
||||
name: 'Test Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
expect(req.fileAccess.file).toBeDefined();
|
||||
});
|
||||
|
||||
test('should allow access when user has VIEW permission on agent with file', async () => {
|
||||
// Create agent owned by otherUser
|
||||
const agent = await createAgent({
|
||||
id: `agent_${Date.now()}`,
|
||||
name: 'Shared Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
execute_code: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant VIEW permission to testUser
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: testUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
permBits: 1, // VIEW permission
|
||||
grantedBy: otherUser._id,
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
});
|
||||
|
||||
test('should check file in ocr tool_resources', async () => {
|
||||
await createAgent({
|
||||
id: `agent_ocr_${Date.now()}`,
|
||||
name: 'OCR Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: {
|
||||
ocr: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
});
|
||||
|
||||
test('should deny access when user has no permission on agent with file', async () => {
|
||||
// Create agent owned by otherUser without granting permission to testUser
|
||||
const agent = await createAgent({
|
||||
id: `agent_${Date.now()}`,
|
||||
name: 'Private Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['shared_file_via_agent'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Create ACL entry for otherUser only (owner)
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: otherUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent._id,
|
||||
permBits: 15, // All permissions
|
||||
grantedBy: otherUser._id,
|
||||
});
|
||||
|
||||
req.params.file_id = 'shared_file_via_agent';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
});
|
||||
});
|
||||
|
||||
describe('multiple agents with same file', () => {
|
||||
/**
|
||||
* This test suite verifies that when multiple agents have the same file,
|
||||
* all agents are checked for permissions, not just the first one found.
|
||||
* This ensures users can access files through any agent they have permission for.
|
||||
*/
|
||||
|
||||
test('should check ALL agents with file, not just first one', async () => {
|
||||
// Create a file owned by someone else
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'multi_agent_file',
|
||||
filepath: '/test/multi.txt',
|
||||
filename: 'multi.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Create first agent (owned by otherUser, no access for testUser)
|
||||
const agent1 = await createAgent({
|
||||
id: 'agent_no_access',
|
||||
name: 'No Access Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['multi_agent_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Create ACL for agent1 - only otherUser has access
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: otherUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent1._id,
|
||||
permBits: 15,
|
||||
grantedBy: otherUser._id,
|
||||
});
|
||||
|
||||
// Create second agent (owned by thirdUser, but testUser has VIEW access)
|
||||
const agent2 = await createAgent({
|
||||
id: 'agent_with_access',
|
||||
name: 'Accessible Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: thirdUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['multi_agent_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Grant testUser VIEW access to agent2
|
||||
await AclEntry.create({
|
||||
principalType: PrincipalType.USER,
|
||||
principalId: testUser._id,
|
||||
principalModel: PrincipalModel.USER,
|
||||
resourceType: ResourceType.AGENT,
|
||||
resourceId: agent2._id,
|
||||
permBits: 1, // VIEW permission
|
||||
grantedBy: thirdUser._id,
|
||||
});
|
||||
|
||||
req.params.file_id = 'multi_agent_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
/**
|
||||
* Should succeed because testUser has access to agent2,
|
||||
* even though they don't have access to agent1.
|
||||
* The fix ensures all agents are checked, not just the first one.
|
||||
*/
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
expect(res.status).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should find file in any agent tool_resources type', async () => {
|
||||
// Create a file
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'multi_tool_file',
|
||||
filepath: '/test/tool.txt',
|
||||
filename: 'tool.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Agent 1: file in file_search (no access for testUser)
|
||||
await createAgent({
|
||||
id: 'agent_file_search',
|
||||
name: 'File Search Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: otherUser._id,
|
||||
tool_resources: {
|
||||
file_search: {
|
||||
file_ids: ['multi_tool_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Agent 2: same file in execute_code (testUser has access)
|
||||
await createAgent({
|
||||
id: 'agent_execute_code',
|
||||
name: 'Execute Code Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: thirdUser._id,
|
||||
tool_resources: {
|
||||
execute_code: {
|
||||
file_ids: ['multi_tool_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Agent 3: same file in ocr (testUser also has access)
|
||||
await createAgent({
|
||||
id: 'agent_ocr',
|
||||
name: 'OCR Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id, // testUser owns this one
|
||||
tool_resources: {
|
||||
ocr: {
|
||||
file_ids: ['multi_tool_file'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
req.params.file_id = 'multi_tool_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
/**
|
||||
* Should succeed because testUser owns agent3,
|
||||
* even if other agents with the file are found first.
|
||||
*/
|
||||
expect(next).toHaveBeenCalled();
|
||||
expect(req.fileAccess).toBeDefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
test('should handle agent with empty tool_resources', async () => {
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'orphan_file',
|
||||
filepath: '/test/orphan.txt',
|
||||
filename: 'orphan.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Create agent with no files in tool_resources
|
||||
await createAgent({
|
||||
id: `agent_empty_${Date.now()}`,
|
||||
name: 'Empty Resources Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: {},
|
||||
});
|
||||
|
||||
req.params.file_id = 'orphan_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
});
|
||||
|
||||
test('should handle agent with null tool_resources', async () => {
|
||||
await createFile({
|
||||
user: otherUser._id.toString(),
|
||||
file_id: 'another_orphan_file',
|
||||
filepath: '/test/orphan2.txt',
|
||||
filename: 'orphan2.txt',
|
||||
type: 'text/plain',
|
||||
size: 100,
|
||||
});
|
||||
|
||||
// Create agent with null tool_resources
|
||||
await createAgent({
|
||||
id: `agent_null_${Date.now()}`,
|
||||
name: 'Null Resources Agent',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
author: testUser._id,
|
||||
tool_resources: null,
|
||||
});
|
||||
|
||||
req.params.file_id = 'another_orphan_file';
|
||||
await fileAccess(req, res, next);
|
||||
|
||||
expect(next).not.toHaveBeenCalled();
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,5 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEmailDomainAllowed } = require('~/server/services/domains');
|
||||
const { isEmailDomainAllowed } = require('@librechat/api');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
|
||||
/**
|
||||
@@ -11,18 +11,25 @@ const { getAppConfig } = require('~/server/services/Config');
|
||||
* @param {Object} res - Express response object.
|
||||
* @param {Function} next - Next middleware function.
|
||||
*
|
||||
* @returns {Promise<function|Object>} - Returns a Promise which when resolved calls next middleware if the domain's email is allowed
|
||||
* @returns {Promise<void>} - Calls next middleware if the domain's email is allowed, otherwise redirects to login
|
||||
*/
|
||||
const checkDomainAllowed = async (req, res, next = () => {}) => {
|
||||
const email = req?.user?.email;
|
||||
const appConfig = await getAppConfig({
|
||||
role: req?.user?.role,
|
||||
});
|
||||
if (email && !isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(`[Social Login] [Social Login not allowed] [Email: ${email}]`);
|
||||
return res.redirect('/login');
|
||||
} else {
|
||||
return next();
|
||||
const checkDomainAllowed = async (req, res, next) => {
|
||||
try {
|
||||
const email = req?.user?.email;
|
||||
const appConfig = await getAppConfig({
|
||||
role: req?.user?.role,
|
||||
});
|
||||
|
||||
if (email && !isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(`[Social Login] [Social Login not allowed] [Email: ${email}]`);
|
||||
res.redirect('/login');
|
||||
return;
|
||||
}
|
||||
|
||||
next();
|
||||
} catch (error) {
|
||||
logger.error('[checkDomainAllowed] Error checking domain:', error);
|
||||
res.redirect('/login');
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -11,6 +11,9 @@ jest.mock('@librechat/api', () => ({
|
||||
completeOAuthFlow: jest.fn(),
|
||||
generateFlowId: jest.fn(),
|
||||
},
|
||||
MCPTokenStorage: {
|
||||
storeTokens: jest.fn(),
|
||||
},
|
||||
getUserMCPAuthMap: jest.fn(),
|
||||
}));
|
||||
|
||||
@@ -47,8 +50,8 @@ jest.mock('~/server/services/Config', () => ({
|
||||
loadCustomConfig: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Config/mcpToolsCache', () => ({
|
||||
updateMCPUserTools: jest.fn(),
|
||||
jest.mock('~/server/services/Config/mcp', () => ({
|
||||
updateMCPServerTools: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/MCP', () => ({
|
||||
@@ -234,7 +237,7 @@ describe('MCP Routes', () => {
|
||||
});
|
||||
|
||||
describe('GET /:serverName/oauth/callback', () => {
|
||||
const { MCPOAuthHandler } = require('@librechat/api');
|
||||
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
it('should redirect to error page when OAuth error is received', async () => {
|
||||
@@ -280,6 +283,7 @@ describe('MCP Routes', () => {
|
||||
it('should handle OAuth callback successfully', async () => {
|
||||
const mockFlowManager = {
|
||||
completeFlow: jest.fn().mockResolvedValue(),
|
||||
deleteFlow: jest.fn().mockResolvedValue(true),
|
||||
};
|
||||
const mockFlowState = {
|
||||
serverName: 'test-server',
|
||||
@@ -295,6 +299,7 @@ describe('MCP Routes', () => {
|
||||
|
||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||
getLogStores.mockReturnValue({});
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
|
||||
@@ -332,11 +337,24 @@ describe('MCP Routes', () => {
|
||||
'test-auth-code',
|
||||
mockFlowManager,
|
||||
);
|
||||
expect(MCPTokenStorage.storeTokens).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
userId: 'test-user-id',
|
||||
serverName: 'test-server',
|
||||
tokens: mockTokens,
|
||||
clientInfo: mockFlowState.clientInfo,
|
||||
metadata: mockFlowState.metadata,
|
||||
}),
|
||||
);
|
||||
const storeInvocation = MCPTokenStorage.storeTokens.mock.invocationCallOrder[0];
|
||||
const connectInvocation = mockMcpManager.getUserConnection.mock.invocationCallOrder[0];
|
||||
expect(storeInvocation).toBeLessThan(connectInvocation);
|
||||
expect(mockFlowManager.completeFlow).toHaveBeenCalledWith(
|
||||
'tool-flow-123',
|
||||
'mcp_oauth',
|
||||
mockTokens,
|
||||
);
|
||||
expect(mockFlowManager.deleteFlow).toHaveBeenCalledWith('test-flow-id', 'mcp_get_tokens');
|
||||
});
|
||||
|
||||
it('should redirect to error page when callback processing fails', async () => {
|
||||
@@ -354,6 +372,7 @@ describe('MCP Routes', () => {
|
||||
it('should handle system-level OAuth completion', async () => {
|
||||
const mockFlowManager = {
|
||||
completeFlow: jest.fn().mockResolvedValue(),
|
||||
deleteFlow: jest.fn().mockResolvedValue(true),
|
||||
};
|
||||
const mockFlowState = {
|
||||
serverName: 'test-server',
|
||||
@@ -369,6 +388,7 @@ describe('MCP Routes', () => {
|
||||
|
||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||
getLogStores.mockReturnValue({});
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
|
||||
@@ -379,11 +399,13 @@ describe('MCP Routes', () => {
|
||||
|
||||
expect(response.status).toBe(302);
|
||||
expect(response.headers.location).toBe('/oauth/success?serverName=test-server');
|
||||
expect(mockFlowManager.deleteFlow).toHaveBeenCalledWith('test-flow-id', 'mcp_get_tokens');
|
||||
});
|
||||
|
||||
it('should handle reconnection failure after OAuth', async () => {
|
||||
const mockFlowManager = {
|
||||
completeFlow: jest.fn().mockResolvedValue(),
|
||||
deleteFlow: jest.fn().mockResolvedValue(true),
|
||||
};
|
||||
const mockFlowState = {
|
||||
serverName: 'test-server',
|
||||
@@ -399,6 +421,7 @@ describe('MCP Routes', () => {
|
||||
|
||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||
getLogStores.mockReturnValue({});
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
|
||||
@@ -418,6 +441,46 @@ describe('MCP Routes', () => {
|
||||
|
||||
expect(response.status).toBe(302);
|
||||
expect(response.headers.location).toBe('/oauth/success?serverName=test-server');
|
||||
expect(MCPTokenStorage.storeTokens).toHaveBeenCalled();
|
||||
expect(mockFlowManager.deleteFlow).toHaveBeenCalledWith('test-flow-id', 'mcp_get_tokens');
|
||||
});
|
||||
|
||||
it('should redirect to error page if token storage fails', async () => {
|
||||
const mockFlowManager = {
|
||||
completeFlow: jest.fn().mockResolvedValue(),
|
||||
deleteFlow: jest.fn().mockResolvedValue(true),
|
||||
};
|
||||
const mockFlowState = {
|
||||
serverName: 'test-server',
|
||||
userId: 'test-user-id',
|
||||
metadata: { toolFlowId: 'tool-flow-123' },
|
||||
clientInfo: {},
|
||||
codeVerifier: 'test-verifier',
|
||||
};
|
||||
const mockTokens = {
|
||||
access_token: 'test-access-token',
|
||||
refresh_token: 'test-refresh-token',
|
||||
};
|
||||
|
||||
MCPOAuthHandler.getFlowState.mockResolvedValue(mockFlowState);
|
||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||
MCPTokenStorage.storeTokens.mockRejectedValue(new Error('store failed'));
|
||||
getLogStores.mockReturnValue({});
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
|
||||
const mockMcpManager = {
|
||||
getUserConnection: jest.fn(),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
const response = await request(app).get('/api/mcp/test-server/oauth/callback').query({
|
||||
code: 'test-auth-code',
|
||||
state: 'test-flow-id',
|
||||
});
|
||||
|
||||
expect(response.status).toBe(302);
|
||||
expect(response.headers.location).toBe('/oauth/error?error=callback_failed');
|
||||
expect(mockMcpManager.getUserConnection).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -778,10 +841,10 @@ describe('MCP Routes', () => {
|
||||
require('~/cache').getLogStores.mockReturnValue({});
|
||||
|
||||
const { getCachedTools, setCachedTools } = require('~/server/services/Config');
|
||||
const { updateMCPUserTools } = require('~/server/services/Config/mcpToolsCache');
|
||||
const { updateMCPServerTools } = require('~/server/services/Config/mcp');
|
||||
getCachedTools.mockResolvedValue({});
|
||||
setCachedTools.mockResolvedValue();
|
||||
updateMCPUserTools.mockResolvedValue();
|
||||
updateMCPServerTools.mockResolvedValue();
|
||||
|
||||
require('~/server/services/Tools/mcp').reinitMCPServer.mockResolvedValue({
|
||||
success: true,
|
||||
@@ -836,10 +899,10 @@ describe('MCP Routes', () => {
|
||||
]);
|
||||
|
||||
const { getCachedTools, setCachedTools } = require('~/server/services/Config');
|
||||
const { updateMCPUserTools } = require('~/server/services/Config/mcpToolsCache');
|
||||
const { updateMCPServerTools } = require('~/server/services/Config/mcp');
|
||||
getCachedTools.mockResolvedValue({});
|
||||
setCachedTools.mockResolvedValue();
|
||||
updateMCPUserTools.mockResolvedValue();
|
||||
updateMCPServerTools.mockResolvedValue();
|
||||
|
||||
require('~/server/services/Tools/mcp').reinitMCPServer.mockResolvedValue({
|
||||
success: true,
|
||||
@@ -1143,7 +1206,11 @@ describe('MCP Routes', () => {
|
||||
|
||||
describe('GET /:serverName/oauth/callback - Edge Cases', () => {
|
||||
it('should handle OAuth callback without toolFlowId (falsy toolFlowId)', async () => {
|
||||
const { MCPOAuthHandler } = require('@librechat/api');
|
||||
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
|
||||
const mockTokens = {
|
||||
access_token: 'edge-access-token',
|
||||
refresh_token: 'edge-refresh-token',
|
||||
};
|
||||
MCPOAuthHandler.getFlowState = jest.fn().mockResolvedValue({
|
||||
id: 'test-flow-id',
|
||||
userId: 'test-user-id',
|
||||
@@ -1155,6 +1222,8 @@ describe('MCP Routes', () => {
|
||||
clientInfo: {},
|
||||
codeVerifier: 'test-verifier',
|
||||
});
|
||||
MCPOAuthHandler.completeOAuthFlow = jest.fn().mockResolvedValue(mockTokens);
|
||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||
|
||||
const mockFlowManager = {
|
||||
completeFlow: jest.fn(),
|
||||
@@ -1179,6 +1248,11 @@ describe('MCP Routes', () => {
|
||||
it('should handle null cached tools in OAuth callback (triggers || {} fallback)', async () => {
|
||||
const { getCachedTools } = require('~/server/services/Config');
|
||||
getCachedTools.mockResolvedValue(null);
|
||||
const { MCPOAuthHandler, MCPTokenStorage } = require('@librechat/api');
|
||||
const mockTokens = {
|
||||
access_token: 'edge-access-token',
|
||||
refresh_token: 'edge-refresh-token',
|
||||
};
|
||||
|
||||
const mockFlowManager = {
|
||||
getFlowState: jest.fn().mockResolvedValue({
|
||||
@@ -1191,6 +1265,15 @@ describe('MCP Routes', () => {
|
||||
completeFlow: jest.fn(),
|
||||
};
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
MCPOAuthHandler.getFlowState.mockResolvedValue({
|
||||
serverName: 'test-server',
|
||||
userId: 'test-user-id',
|
||||
metadata: { serverUrl: 'https://example.com', oauth: {} },
|
||||
clientInfo: {},
|
||||
codeVerifier: 'test-verifier',
|
||||
});
|
||||
MCPOAuthHandler.completeOAuthFlow.mockResolvedValue(mockTokens);
|
||||
MCPTokenStorage.storeTokens.mockResolvedValue();
|
||||
|
||||
const mockMcpManager = {
|
||||
getUserConnection: jest.fn().mockResolvedValue({
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
const express = require('express');
|
||||
const { nanoid } = require('nanoid');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { generateCheckAccess } = require('@librechat/api');
|
||||
const { generateCheckAccess, isActionDomainAllowed } = require('@librechat/api');
|
||||
const {
|
||||
Permissions,
|
||||
ResourceType,
|
||||
PermissionBits,
|
||||
PermissionTypes,
|
||||
actionDelimiter,
|
||||
PermissionBits,
|
||||
removeNullishValues,
|
||||
} = require('librechat-data-provider');
|
||||
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
|
||||
const { findAccessibleResources } = require('~/server/services/PermissionService');
|
||||
const { getAgent, updateAgent, getListAgentsByAccess } = require('~/models/Agent');
|
||||
const { updateAction, getActions, deleteAction } = require('~/models/Action');
|
||||
const { isActionDomainAllowed } = require('~/server/services/domains');
|
||||
const { canAccessAgentResource } = require('~/server/middleware');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
const express = require('express');
|
||||
const { nanoid } = require('nanoid');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isActionDomainAllowed } = require('@librechat/api');
|
||||
const { actionDelimiter, EModelEndpoint, removeNullishValues } = require('librechat-data-provider');
|
||||
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
|
||||
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
|
||||
const { updateAction, getActions, deleteAction } = require('~/models/Action');
|
||||
const { updateAssistantDoc, getAssistant } = require('~/models/Assistant');
|
||||
const { isActionDomainAllowed } = require('~/server/services/domains');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
|
||||
@@ -1,19 +1,33 @@
|
||||
const { Router } = require('express');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { MCPOAuthHandler, getUserMCPAuthMap } = require('@librechat/api');
|
||||
const { CacheKeys, Constants } = require('librechat-data-provider');
|
||||
const {
|
||||
createSafeUser,
|
||||
MCPOAuthHandler,
|
||||
MCPTokenStorage,
|
||||
getUserMCPAuthMap,
|
||||
} = require('@librechat/api');
|
||||
const { getMCPManager, getFlowStateManager, getOAuthReconnectionManager } = require('~/config');
|
||||
const { getMCPSetupData, getServerConnectionStatus } = require('~/server/services/MCP');
|
||||
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
|
||||
const { updateMCPUserTools } = require('~/server/services/Config/mcpToolsCache');
|
||||
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
|
||||
const { CacheKeys, Constants } = require('librechat-data-provider');
|
||||
const { getMCPManager, getFlowStateManager } = require('~/config');
|
||||
const { updateMCPServerTools } = require('~/server/services/Config/mcp');
|
||||
const { reinitMCPServer } = require('~/server/services/Tools/mcp');
|
||||
const { getMCPTools } = require('~/server/controllers/mcp');
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
const { findPluginAuthsByKeys } = require('~/models');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
const router = Router();
|
||||
|
||||
/**
|
||||
* Get all MCP tools available to the user
|
||||
* Returns only MCP tools, completely decoupled from regular LibreChat tools
|
||||
*/
|
||||
router.get('/tools', requireJwtAuth, async (req, res) => {
|
||||
return getMCPTools(req, res);
|
||||
});
|
||||
|
||||
/**
|
||||
* Initiate OAuth flow
|
||||
* This endpoint is called when the user clicks the auth link in the UI
|
||||
@@ -121,6 +135,41 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
|
||||
const tokens = await MCPOAuthHandler.completeOAuthFlow(flowId, code, flowManager);
|
||||
logger.info('[MCP OAuth] OAuth flow completed, tokens received in callback route');
|
||||
|
||||
/** Persist tokens immediately so reconnection uses fresh credentials */
|
||||
if (flowState?.userId && tokens) {
|
||||
try {
|
||||
await MCPTokenStorage.storeTokens({
|
||||
userId: flowState.userId,
|
||||
serverName,
|
||||
tokens,
|
||||
createToken,
|
||||
updateToken,
|
||||
findToken,
|
||||
clientInfo: flowState.clientInfo,
|
||||
metadata: flowState.metadata,
|
||||
});
|
||||
logger.debug('[MCP OAuth] Stored OAuth tokens prior to reconnection', {
|
||||
serverName,
|
||||
userId: flowState.userId,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[MCP OAuth] Failed to store OAuth tokens after callback', error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear any cached `mcp_get_tokens` flow result so subsequent lookups
|
||||
* re-fetch the freshly stored credentials instead of returning stale nulls.
|
||||
*/
|
||||
if (typeof flowManager?.deleteFlow === 'function') {
|
||||
try {
|
||||
await flowManager.deleteFlow(flowId, 'mcp_get_tokens');
|
||||
} catch (error) {
|
||||
logger.warn('[MCP OAuth] Failed to clear cached token flow state', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const mcpManager = getMCPManager(flowState.userId);
|
||||
logger.debug(`[MCP OAuth] Attempting to reconnect ${serverName} with new OAuth tokens`);
|
||||
@@ -144,9 +193,12 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
|
||||
`[MCP OAuth] Successfully reconnected ${serverName} for user ${flowState.userId}`,
|
||||
);
|
||||
|
||||
// clear any reconnection attempts
|
||||
const oauthReconnectionManager = getOAuthReconnectionManager();
|
||||
oauthReconnectionManager.clearReconnection(flowState.userId, serverName);
|
||||
|
||||
const tools = await userConnection.fetchTools();
|
||||
await updateMCPUserTools({
|
||||
userId: flowState.userId,
|
||||
await updateMCPServerTools({
|
||||
serverName,
|
||||
tools,
|
||||
});
|
||||
@@ -288,9 +340,9 @@ router.post('/oauth/cancel/:serverName', requireJwtAuth, async (req, res) => {
|
||||
router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
|
||||
try {
|
||||
const { serverName } = req.params;
|
||||
const user = req.user;
|
||||
const user = createSafeUser(req.user);
|
||||
|
||||
if (!user?.id) {
|
||||
if (!user.id) {
|
||||
return res.status(401).json({ error: 'User not authenticated' });
|
||||
}
|
||||
|
||||
@@ -320,7 +372,7 @@ router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
|
||||
}
|
||||
|
||||
const result = await reinitMCPServer({
|
||||
req,
|
||||
user,
|
||||
serverName,
|
||||
userMCPAuthMap,
|
||||
});
|
||||
|
||||
@@ -3,8 +3,8 @@ const { logger } = require('@librechat/data-schemas');
|
||||
const { ContentTypes } = require('librechat-data-provider');
|
||||
const {
|
||||
saveConvo,
|
||||
saveMessage,
|
||||
getMessage,
|
||||
saveMessage,
|
||||
getMessages,
|
||||
updateMessage,
|
||||
deleteMessages,
|
||||
@@ -58,34 +58,51 @@ router.get('/', async (req, res) => {
|
||||
const nextCursor = messages.length > pageSize ? messages.pop()[sortField] : null;
|
||||
response = { messages, nextCursor };
|
||||
} else if (search) {
|
||||
const searchResults = await Message.meiliSearch(search, undefined, true);
|
||||
const searchResults = await Message.meiliSearch(search, { filter: `user = "${user}"` }, true);
|
||||
|
||||
const messages = searchResults.hits || [];
|
||||
|
||||
const result = await getConvosQueried(req.user.id, messages, cursor);
|
||||
|
||||
const activeMessages = [];
|
||||
const messageIds = [];
|
||||
const cleanedMessages = [];
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
let message = messages[i];
|
||||
if (message.conversationId.includes('--')) {
|
||||
message.conversationId = cleanUpPrimaryKeyValue(message.conversationId);
|
||||
}
|
||||
if (result.convoMap[message.conversationId]) {
|
||||
const convo = result.convoMap[message.conversationId];
|
||||
|
||||
const dbMessage = await getMessage({ user, messageId: message.messageId });
|
||||
activeMessages.push({
|
||||
...message,
|
||||
title: convo.title,
|
||||
conversationId: message.conversationId,
|
||||
model: convo.model,
|
||||
isCreatedByUser: dbMessage?.isCreatedByUser,
|
||||
endpoint: dbMessage?.endpoint,
|
||||
iconURL: dbMessage?.iconURL,
|
||||
});
|
||||
messageIds.push(message.messageId);
|
||||
cleanedMessages.push(message);
|
||||
}
|
||||
}
|
||||
|
||||
const dbMessages = await getMessages({
|
||||
user,
|
||||
messageId: { $in: messageIds },
|
||||
});
|
||||
|
||||
const dbMessageMap = {};
|
||||
for (const dbMessage of dbMessages) {
|
||||
dbMessageMap[dbMessage.messageId] = dbMessage;
|
||||
}
|
||||
|
||||
const activeMessages = [];
|
||||
for (const message of cleanedMessages) {
|
||||
const convo = result.convoMap[message.conversationId];
|
||||
const dbMessage = dbMessageMap[message.messageId];
|
||||
|
||||
activeMessages.push({
|
||||
...message,
|
||||
title: convo.title,
|
||||
conversationId: message.conversationId,
|
||||
model: convo.model,
|
||||
isCreatedByUser: dbMessage?.isCreatedByUser,
|
||||
endpoint: dbMessage?.endpoint,
|
||||
iconURL: dbMessage?.iconURL,
|
||||
});
|
||||
}
|
||||
|
||||
response = { messages: activeMessages, nextCursor: null };
|
||||
} else {
|
||||
response = { messages: [], nextCursor: null };
|
||||
|
||||
@@ -26,9 +26,12 @@ const domains = {
|
||||
router.use(logHeaders);
|
||||
router.use(loginLimiter);
|
||||
|
||||
const oauthHandler = async (req, res) => {
|
||||
const oauthHandler = async (req, res, next) => {
|
||||
try {
|
||||
await checkDomainAllowed(req, res);
|
||||
if (res.headersSent) {
|
||||
return;
|
||||
}
|
||||
|
||||
await checkBan(req, res);
|
||||
if (req.banned) {
|
||||
return;
|
||||
@@ -46,6 +49,7 @@ const oauthHandler = async (req, res) => {
|
||||
res.redirect(domains.client);
|
||||
} catch (err) {
|
||||
logger.error('Error in setting authentication tokens:', err);
|
||||
next(err);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -79,6 +83,7 @@ router.get(
|
||||
scope: ['openid', 'profile', 'email'],
|
||||
}),
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
oauthHandler,
|
||||
);
|
||||
|
||||
@@ -104,6 +109,7 @@ router.get(
|
||||
profileFields: ['id', 'email', 'name'],
|
||||
}),
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
oauthHandler,
|
||||
);
|
||||
|
||||
@@ -125,6 +131,7 @@ router.get(
|
||||
session: false,
|
||||
}),
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
oauthHandler,
|
||||
);
|
||||
|
||||
@@ -148,6 +155,7 @@ router.get(
|
||||
scope: ['user:email', 'read:user'],
|
||||
}),
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
oauthHandler,
|
||||
);
|
||||
|
||||
@@ -171,6 +179,7 @@ router.get(
|
||||
scope: ['identify', 'email'],
|
||||
}),
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
oauthHandler,
|
||||
);
|
||||
|
||||
@@ -192,6 +201,7 @@ router.post(
|
||||
session: false,
|
||||
}),
|
||||
setBalanceConfig,
|
||||
checkDomainAllowed,
|
||||
oauthHandler,
|
||||
);
|
||||
|
||||
|
||||
@@ -1,16 +1,12 @@
|
||||
const { FileSources, EModelEndpoint, getConfigDefaults } = require('librechat-data-provider');
|
||||
const {
|
||||
isEnabled,
|
||||
loadOCRConfig,
|
||||
loadMemoryConfig,
|
||||
agentsConfigSetup,
|
||||
loadWebSearchConfig,
|
||||
loadDefaultInterface,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
FileSources,
|
||||
loadOCRConfig,
|
||||
EModelEndpoint,
|
||||
getConfigDefaults,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
checkWebSearchConfig,
|
||||
checkVariables,
|
||||
|
||||
@@ -142,7 +142,6 @@ describe('AppService', () => {
|
||||
turnstileConfig: mockedTurnstileConfig,
|
||||
modelSpecs: undefined,
|
||||
paths: expect.anything(),
|
||||
ocr: expect.anything(),
|
||||
imageOutputType: expect.any(String),
|
||||
fileConfig: undefined,
|
||||
secureImageLinks: undefined,
|
||||
@@ -152,6 +151,7 @@ describe('AppService', () => {
|
||||
webSearch: expect.objectContaining({
|
||||
safeSearch: 1,
|
||||
jinaApiKey: '${JINA_API_KEY}',
|
||||
jinaApiUrl: '${JINA_API_URL}',
|
||||
cohereApiKey: '${COHERE_API_KEY}',
|
||||
serperApiKey: '${SERPER_API_KEY}',
|
||||
searxngApiKey: '${SEARXNG_API_KEY}',
|
||||
|
||||
@@ -2,13 +2,13 @@ const bcrypt = require('bcryptjs');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const { webcrypto } = require('node:crypto');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, checkEmailConfig } = require('@librechat/api');
|
||||
const { SystemRoles, errorsToString } = require('librechat-data-provider');
|
||||
const { isEnabled, checkEmailConfig, isEmailDomainAllowed } = require('@librechat/api');
|
||||
const { ErrorTypes, SystemRoles, errorsToString } = require('librechat-data-provider');
|
||||
const {
|
||||
findUser,
|
||||
findToken,
|
||||
createUser,
|
||||
updateUser,
|
||||
findToken,
|
||||
countUsers,
|
||||
getUserById,
|
||||
findSession,
|
||||
@@ -20,7 +20,6 @@ const {
|
||||
deleteUserById,
|
||||
generateRefreshToken,
|
||||
} = require('~/models');
|
||||
const { isEmailDomainAllowed } = require('~/server/services/domains');
|
||||
const { registerSchema } = require('~/strategies/validators');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
const { sendEmail } = require('~/server/utils');
|
||||
@@ -130,7 +129,7 @@ const verifyEmail = async (req) => {
|
||||
return { message: 'Email already verified', status: 'success' };
|
||||
}
|
||||
|
||||
let emailVerificationData = await findToken({ email: decodedEmail });
|
||||
let emailVerificationData = await findToken({ email: decodedEmail }, { sort: { createdAt: -1 } });
|
||||
|
||||
if (!emailVerificationData) {
|
||||
logger.warn(`[verifyEmail] [No email verification data found] [Email: ${decodedEmail}]`);
|
||||
@@ -181,6 +180,14 @@ const registerUser = async (user, additionalData = {}) => {
|
||||
|
||||
let newUserId;
|
||||
try {
|
||||
const appConfig = await getAppConfig();
|
||||
if (!isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
const errorMessage =
|
||||
'The email address provided cannot be used. Please use a different email address.';
|
||||
logger.error(`[registerUser] [Registration not allowed] [Email: ${user.email}]`);
|
||||
return { status: 403, message: errorMessage };
|
||||
}
|
||||
|
||||
const existingUser = await findUser({ email }, 'email _id');
|
||||
|
||||
if (existingUser) {
|
||||
@@ -195,14 +202,6 @@ const registerUser = async (user, additionalData = {}) => {
|
||||
return { status: 200, message: genericVerificationMessage };
|
||||
}
|
||||
|
||||
const appConfig = await getAppConfig({ role: user.role });
|
||||
if (!isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
const errorMessage =
|
||||
'The email address provided cannot be used. Please use a different email address.';
|
||||
logger.error(`[registerUser] [Registration not allowed] [Email: ${user.email}]`);
|
||||
return { status: 403, message: errorMessage };
|
||||
}
|
||||
|
||||
//determine if this is the first registered user (not counting anonymous_user)
|
||||
const isFirstRegisteredUser = (await countUsers()) === 0;
|
||||
|
||||
@@ -252,6 +251,13 @@ const registerUser = async (user, additionalData = {}) => {
|
||||
*/
|
||||
const requestPasswordReset = async (req) => {
|
||||
const { email } = req.body;
|
||||
const appConfig = await getAppConfig();
|
||||
if (!isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
const error = new Error(ErrorTypes.AUTH_FAILED);
|
||||
error.code = ErrorTypes.AUTH_FAILED;
|
||||
error.message = 'Email domain not allowed';
|
||||
return error;
|
||||
}
|
||||
const user = await findUser({ email }, 'email _id');
|
||||
const emailEnabled = checkEmailConfig();
|
||||
|
||||
@@ -313,9 +319,12 @@ const requestPasswordReset = async (req) => {
|
||||
* @returns
|
||||
*/
|
||||
const resetPassword = async (userId, token, password) => {
|
||||
let passwordResetToken = await findToken({
|
||||
userId,
|
||||
});
|
||||
let passwordResetToken = await findToken(
|
||||
{
|
||||
userId,
|
||||
},
|
||||
{ sort: { createdAt: -1 } },
|
||||
);
|
||||
|
||||
if (!passwordResetToken) {
|
||||
return new Error('Invalid or expired password reset token');
|
||||
@@ -350,23 +359,18 @@ const resetPassword = async (userId, token, password) => {
|
||||
|
||||
/**
|
||||
* Set Auth Tokens
|
||||
*
|
||||
* @param {String | ObjectId} userId
|
||||
* @param {Object} res
|
||||
* @param {String} sessionId
|
||||
* @param {ServerResponse} res
|
||||
* @param {ISession | null} [session=null]
|
||||
* @returns
|
||||
*/
|
||||
const setAuthTokens = async (userId, res, sessionId = null) => {
|
||||
const setAuthTokens = async (userId, res, _session = null) => {
|
||||
try {
|
||||
const user = await getUserById(userId);
|
||||
const token = await generateToken(user);
|
||||
|
||||
let session;
|
||||
let session = _session;
|
||||
let refreshToken;
|
||||
let refreshTokenExpires;
|
||||
|
||||
if (sessionId) {
|
||||
session = await findSession({ sessionId: sessionId }, { lean: false });
|
||||
if (session && session._id && session.expiration != null) {
|
||||
refreshTokenExpires = session.expiration.getTime();
|
||||
refreshToken = await generateRefreshToken(session);
|
||||
} else {
|
||||
@@ -376,6 +380,9 @@ const setAuthTokens = async (userId, res, sessionId = null) => {
|
||||
refreshTokenExpires = session.expiration.getTime();
|
||||
}
|
||||
|
||||
const user = await getUserById(userId);
|
||||
const token = await generateToken(user);
|
||||
|
||||
res.cookie('refreshToken', refreshToken, {
|
||||
expires: new Date(refreshTokenExpires),
|
||||
httpOnly: true,
|
||||
|
||||
@@ -4,6 +4,8 @@ const AppService = require('~/server/services/AppService');
|
||||
const { setCachedTools } = require('./getCachedTools');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
const BASE_CONFIG_KEY = '_BASE_';
|
||||
|
||||
/**
|
||||
* Get the app configuration based on user context
|
||||
* @param {Object} [options]
|
||||
@@ -14,8 +16,8 @@ const getLogStores = require('~/cache/getLogStores');
|
||||
async function getAppConfig(options = {}) {
|
||||
const { role, refresh } = options;
|
||||
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const cacheKey = role ? `${CacheKeys.APP_CONFIG}:${role}` : CacheKeys.APP_CONFIG;
|
||||
const cache = getLogStores(CacheKeys.APP_CONFIG);
|
||||
const cacheKey = role ? role : BASE_CONFIG_KEY;
|
||||
|
||||
if (!refresh) {
|
||||
const cached = await cache.get(cacheKey);
|
||||
@@ -24,7 +26,7 @@ async function getAppConfig(options = {}) {
|
||||
}
|
||||
}
|
||||
|
||||
let baseConfig = await cache.get(CacheKeys.APP_CONFIG);
|
||||
let baseConfig = await cache.get(BASE_CONFIG_KEY);
|
||||
if (!baseConfig) {
|
||||
logger.info('[getAppConfig] App configuration not initialized. Initializing AppService...');
|
||||
baseConfig = await AppService();
|
||||
@@ -34,10 +36,10 @@ async function getAppConfig(options = {}) {
|
||||
}
|
||||
|
||||
if (baseConfig.availableTools) {
|
||||
await setCachedTools(baseConfig.availableTools, { isGlobal: true });
|
||||
await setCachedTools(baseConfig.availableTools);
|
||||
}
|
||||
|
||||
await cache.set(CacheKeys.APP_CONFIG, baseConfig);
|
||||
await cache.set(BASE_CONFIG_KEY, baseConfig);
|
||||
}
|
||||
|
||||
// For now, return the base config
|
||||
|
||||
@@ -3,89 +3,32 @@ const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
/**
|
||||
* Cache key generators for different tool access patterns
|
||||
* These will support future permission-based caching
|
||||
*/
|
||||
const ToolCacheKeys = {
|
||||
/** Global tools available to all users */
|
||||
GLOBAL: 'tools:global',
|
||||
/** Tools available to a specific user */
|
||||
USER: (userId) => `tools:user:${userId}`,
|
||||
/** Tools available to a specific role */
|
||||
ROLE: (roleId) => `tools:role:${roleId}`,
|
||||
/** Tools available to a specific group */
|
||||
GROUP: (groupId) => `tools:group:${groupId}`,
|
||||
/** Combined effective tools for a user (computed from all sources) */
|
||||
EFFECTIVE: (userId) => `tools:effective:${userId}`,
|
||||
/** MCP tools cached by server name */
|
||||
MCP_SERVER: (serverName) => `tools:mcp:${serverName}`,
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves available tools from cache
|
||||
* @function getCachedTools
|
||||
* @param {Object} options - Options for retrieving tools
|
||||
* @param {string} [options.userId] - User ID for user-specific tools
|
||||
* @param {string[]} [options.roleIds] - Role IDs for role-based tools
|
||||
* @param {string[]} [options.groupIds] - Group IDs for group-based tools
|
||||
* @param {boolean} [options.includeGlobal=true] - Whether to include global tools
|
||||
* @param {string} [options.serverName] - MCP server name to get cached tools for
|
||||
* @returns {Promise<LCAvailableTools|null>} The available tools object or null if not cached
|
||||
*/
|
||||
async function getCachedTools(options = {}) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const { userId, roleIds = [], groupIds = [], includeGlobal = true } = options;
|
||||
const { serverName } = options;
|
||||
|
||||
// For now, return global tools (current behavior)
|
||||
// This will be expanded to merge tools from different sources
|
||||
if (!userId && includeGlobal) {
|
||||
return await cache.get(ToolCacheKeys.GLOBAL);
|
||||
// Return MCP server-specific tools if requested
|
||||
if (serverName) {
|
||||
return await cache.get(ToolCacheKeys.MCP_SERVER(serverName));
|
||||
}
|
||||
|
||||
// Future implementation will merge tools from multiple sources
|
||||
// based on user permissions, roles, and groups
|
||||
if (userId) {
|
||||
/** @type {LCAvailableTools | null} Check if we have pre-computed effective tools for this user */
|
||||
const effectiveTools = await cache.get(ToolCacheKeys.EFFECTIVE(userId));
|
||||
if (effectiveTools) {
|
||||
return effectiveTools;
|
||||
}
|
||||
|
||||
/** @type {LCAvailableTools | null} Otherwise, compute from individual sources */
|
||||
const toolSources = [];
|
||||
|
||||
if (includeGlobal) {
|
||||
const globalTools = await cache.get(ToolCacheKeys.GLOBAL);
|
||||
if (globalTools) {
|
||||
toolSources.push(globalTools);
|
||||
}
|
||||
}
|
||||
|
||||
// User-specific tools
|
||||
const userTools = await cache.get(ToolCacheKeys.USER(userId));
|
||||
if (userTools) {
|
||||
toolSources.push(userTools);
|
||||
}
|
||||
|
||||
// Role-based tools
|
||||
for (const roleId of roleIds) {
|
||||
const roleTools = await cache.get(ToolCacheKeys.ROLE(roleId));
|
||||
if (roleTools) {
|
||||
toolSources.push(roleTools);
|
||||
}
|
||||
}
|
||||
|
||||
// Group-based tools
|
||||
for (const groupId of groupIds) {
|
||||
const groupTools = await cache.get(ToolCacheKeys.GROUP(groupId));
|
||||
if (groupTools) {
|
||||
toolSources.push(groupTools);
|
||||
}
|
||||
}
|
||||
|
||||
// Merge all tool sources (for now, simple merge - future will handle conflicts)
|
||||
if (toolSources.length > 0) {
|
||||
return mergeToolSources(toolSources);
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
// Default to global tools
|
||||
return await cache.get(ToolCacheKeys.GLOBAL);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -93,49 +36,34 @@ async function getCachedTools(options = {}) {
|
||||
* @function setCachedTools
|
||||
* @param {Object} tools - The tools object to cache
|
||||
* @param {Object} options - Options for caching tools
|
||||
* @param {string} [options.userId] - User ID for user-specific tools
|
||||
* @param {string} [options.roleId] - Role ID for role-based tools
|
||||
* @param {string} [options.groupId] - Group ID for group-based tools
|
||||
* @param {boolean} [options.isGlobal=false] - Whether these are global tools
|
||||
* @param {string} [options.serverName] - MCP server name for server-specific tools
|
||||
* @param {number} [options.ttl] - Time to live in milliseconds
|
||||
* @returns {Promise<boolean>} Whether the operation was successful
|
||||
*/
|
||||
async function setCachedTools(tools, options = {}) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const { userId, roleId, groupId, isGlobal = false, ttl } = options;
|
||||
const { serverName, ttl } = options;
|
||||
|
||||
let cacheKey;
|
||||
if (isGlobal || (!userId && !roleId && !groupId)) {
|
||||
cacheKey = ToolCacheKeys.GLOBAL;
|
||||
} else if (userId) {
|
||||
cacheKey = ToolCacheKeys.USER(userId);
|
||||
} else if (roleId) {
|
||||
cacheKey = ToolCacheKeys.ROLE(roleId);
|
||||
} else if (groupId) {
|
||||
cacheKey = ToolCacheKeys.GROUP(groupId);
|
||||
// Cache by MCP server if specified
|
||||
if (serverName) {
|
||||
return await cache.set(ToolCacheKeys.MCP_SERVER(serverName), tools, ttl);
|
||||
}
|
||||
|
||||
if (!cacheKey) {
|
||||
throw new Error('Invalid cache key options provided');
|
||||
}
|
||||
|
||||
return await cache.set(cacheKey, tools, ttl);
|
||||
// Default to global cache
|
||||
return await cache.set(ToolCacheKeys.GLOBAL, tools, ttl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidates cached tools
|
||||
* @function invalidateCachedTools
|
||||
* @param {Object} options - Options for invalidating tools
|
||||
* @param {string} [options.userId] - User ID to invalidate
|
||||
* @param {string} [options.roleId] - Role ID to invalidate
|
||||
* @param {string} [options.groupId] - Group ID to invalidate
|
||||
* @param {string} [options.serverName] - MCP server name to invalidate
|
||||
* @param {boolean} [options.invalidateGlobal=false] - Whether to invalidate global tools
|
||||
* @param {boolean} [options.invalidateEffective=true] - Whether to invalidate effective tools
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function invalidateCachedTools(options = {}) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const { userId, roleId, groupId, invalidateGlobal = false, invalidateEffective = true } = options;
|
||||
const { serverName, invalidateGlobal = false } = options;
|
||||
|
||||
const keysToDelete = [];
|
||||
|
||||
@@ -143,116 +71,34 @@ async function invalidateCachedTools(options = {}) {
|
||||
keysToDelete.push(ToolCacheKeys.GLOBAL);
|
||||
}
|
||||
|
||||
if (userId) {
|
||||
keysToDelete.push(ToolCacheKeys.USER(userId));
|
||||
if (invalidateEffective) {
|
||||
keysToDelete.push(ToolCacheKeys.EFFECTIVE(userId));
|
||||
}
|
||||
}
|
||||
|
||||
if (roleId) {
|
||||
keysToDelete.push(ToolCacheKeys.ROLE(roleId));
|
||||
// TODO: In future, invalidate all users with this role
|
||||
}
|
||||
|
||||
if (groupId) {
|
||||
keysToDelete.push(ToolCacheKeys.GROUP(groupId));
|
||||
// TODO: In future, invalidate all users in this group
|
||||
if (serverName) {
|
||||
keysToDelete.push(ToolCacheKeys.MCP_SERVER(serverName));
|
||||
}
|
||||
|
||||
await Promise.all(keysToDelete.map((key) => cache.delete(key)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes and caches effective tools for a user
|
||||
* @function computeEffectiveTools
|
||||
* @param {string} userId - The user ID
|
||||
* @param {Object} context - Context containing user's roles and groups
|
||||
* @param {string[]} [context.roleIds=[]] - User's role IDs
|
||||
* @param {string[]} [context.groupIds=[]] - User's group IDs
|
||||
* @param {number} [ttl] - Time to live for the computed result
|
||||
* @returns {Promise<Object>} The computed effective tools
|
||||
* Gets MCP tools for a specific server from cache or merges with global tools
|
||||
* @function getMCPServerTools
|
||||
* @param {string} serverName - The MCP server name
|
||||
* @returns {Promise<LCAvailableTools|null>} The available tools for the server
|
||||
*/
|
||||
async function computeEffectiveTools(userId, context = {}, ttl) {
|
||||
const { roleIds = [], groupIds = [] } = context;
|
||||
async function getMCPServerTools(serverName) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const serverTools = await cache.get(ToolCacheKeys.MCP_SERVER(serverName));
|
||||
|
||||
// Get all tool sources
|
||||
const tools = await getCachedTools({
|
||||
userId,
|
||||
roleIds,
|
||||
groupIds,
|
||||
includeGlobal: true,
|
||||
});
|
||||
|
||||
if (tools) {
|
||||
// Cache the computed result
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.set(ToolCacheKeys.EFFECTIVE(userId), tools, ttl);
|
||||
if (serverTools) {
|
||||
return serverTools;
|
||||
}
|
||||
|
||||
return tools;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges multiple tool sources into a single tools object
|
||||
* @function mergeToolSources
|
||||
* @param {Object[]} sources - Array of tool objects to merge
|
||||
* @returns {Object} Merged tools object
|
||||
*/
|
||||
function mergeToolSources(sources) {
|
||||
// For now, simple merge that combines all tools
|
||||
// Future implementation will handle:
|
||||
// - Permission precedence (deny > allow)
|
||||
// - Tool property conflicts
|
||||
// - Metadata merging
|
||||
const merged = {};
|
||||
|
||||
for (const source of sources) {
|
||||
if (!source || typeof source !== 'object') {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const [toolId, toolConfig] of Object.entries(source)) {
|
||||
// Simple last-write-wins for now
|
||||
// Future: merge based on permission levels
|
||||
merged[toolId] = toolConfig;
|
||||
}
|
||||
}
|
||||
|
||||
return merged;
|
||||
}
|
||||
|
||||
/**
|
||||
* Middleware-friendly function to get tools for a request
|
||||
* @function getToolsForRequest
|
||||
* @param {Object} req - Express request object
|
||||
* @returns {Promise<Object|null>} Available tools for the request
|
||||
*/
|
||||
async function getToolsForRequest(req) {
|
||||
const userId = req.user?.id;
|
||||
|
||||
// For now, return global tools if no user
|
||||
if (!userId) {
|
||||
return getCachedTools({ includeGlobal: true });
|
||||
}
|
||||
|
||||
// Future: Extract roles and groups from req.user
|
||||
const roleIds = req.user?.roles || [];
|
||||
const groupIds = req.user?.groups || [];
|
||||
|
||||
return getCachedTools({
|
||||
userId,
|
||||
roleIds,
|
||||
groupIds,
|
||||
includeGlobal: true,
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
ToolCacheKeys,
|
||||
getCachedTools,
|
||||
setCachedTools,
|
||||
getToolsForRequest,
|
||||
getMCPServerTools,
|
||||
invalidateCachedTools,
|
||||
computeEffectiveTools,
|
||||
};
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const appConfig = require('./app');
|
||||
const mcpToolsCache = require('./mcp');
|
||||
const { config } = require('./EndpointService');
|
||||
const getCachedTools = require('./getCachedTools');
|
||||
const mcpToolsCache = require('./mcpToolsCache');
|
||||
const loadCustomConfig = require('./loadCustomConfig');
|
||||
const loadConfigModels = require('./loadConfigModels');
|
||||
const loadDefaultModels = require('./loadDefaultModels');
|
||||
|
||||
@@ -119,10 +119,6 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
|
||||
.filter((endpoint) => endpoint.customParams)
|
||||
.forEach((endpoint) => parseCustomParams(endpoint.name, endpoint.customParams));
|
||||
|
||||
if (customConfig.cache) {
|
||||
const cache = getLogStores(CacheKeys.STATIC_CONFIG);
|
||||
await cache.set(CacheKeys.LIBRECHAT_YAML_CONFIG, customConfig);
|
||||
}
|
||||
|
||||
if (result.data.modelSpecs) {
|
||||
customConfig.modelSpecs = result.data.modelSpecs;
|
||||
|
||||
@@ -48,16 +48,11 @@ const axios = require('axios');
|
||||
const { loadYaml } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const loadCustomConfig = require('./loadCustomConfig');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
describe('loadCustomConfig', () => {
|
||||
const mockSet = jest.fn();
|
||||
const mockCache = { set: mockSet };
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetAllMocks();
|
||||
delete process.env.CONFIG_PATH;
|
||||
getLogStores.mockReturnValue(mockCache);
|
||||
});
|
||||
|
||||
it('should return null and log error if remote config fetch fails', async () => {
|
||||
@@ -94,7 +89,6 @@ describe('loadCustomConfig', () => {
|
||||
const result = await loadCustomConfig();
|
||||
|
||||
expect(result).toEqual(mockConfig);
|
||||
expect(mockSet).toHaveBeenCalledWith(expect.anything(), mockConfig);
|
||||
});
|
||||
|
||||
it('should return null and log if config schema validation fails', async () => {
|
||||
@@ -134,7 +128,6 @@ describe('loadCustomConfig', () => {
|
||||
axios.get.mockResolvedValue({ data: mockConfig });
|
||||
const result = await loadCustomConfig();
|
||||
expect(result).toEqual(mockConfig);
|
||||
expect(mockSet).toHaveBeenCalledWith(expect.anything(), mockConfig);
|
||||
});
|
||||
|
||||
it('should return null if the remote config file is not found', async () => {
|
||||
@@ -168,7 +161,6 @@ describe('loadCustomConfig', () => {
|
||||
process.env.CONFIG_PATH = 'validConfig.yaml';
|
||||
loadYaml.mockReturnValueOnce(mockConfig);
|
||||
await loadCustomConfig();
|
||||
expect(mockSet).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should log the loaded custom config', async () => {
|
||||
|
||||
91
api/server/services/Config/mcp.js
Normal file
91
api/server/services/Config/mcp.js
Normal file
@@ -0,0 +1,91 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, Constants } = require('librechat-data-provider');
|
||||
const { getCachedTools, setCachedTools } = require('./getCachedTools');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
/**
|
||||
* Updates MCP tools in the cache for a specific server
|
||||
* @param {Object} params - Parameters for updating MCP tools
|
||||
* @param {string} params.serverName - MCP server name
|
||||
* @param {Array} params.tools - Array of tool objects from MCP server
|
||||
* @returns {Promise<LCAvailableTools>}
|
||||
*/
|
||||
async function updateMCPServerTools({ serverName, tools }) {
|
||||
try {
|
||||
const serverTools = {};
|
||||
const mcpDelimiter = Constants.mcp_delimiter;
|
||||
|
||||
for (const tool of tools) {
|
||||
const name = `${tool.name}${mcpDelimiter}${serverName}`;
|
||||
serverTools[name] = {
|
||||
type: 'function',
|
||||
['function']: {
|
||||
name,
|
||||
description: tool.description,
|
||||
parameters: tool.inputSchema,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
await setCachedTools(serverTools, { serverName });
|
||||
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.delete(CacheKeys.TOOLS);
|
||||
logger.debug(`[MCP Cache] Updated ${tools.length} tools for server ${serverName}`);
|
||||
return serverTools;
|
||||
} catch (error) {
|
||||
logger.error(`[MCP Cache] Failed to update tools for ${serverName}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges app-level tools with global tools
|
||||
* @param {import('@librechat/api').LCAvailableTools} appTools
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function mergeAppTools(appTools) {
|
||||
try {
|
||||
const count = Object.keys(appTools).length;
|
||||
if (!count) {
|
||||
return;
|
||||
}
|
||||
const cachedTools = await getCachedTools();
|
||||
const mergedTools = { ...cachedTools, ...appTools };
|
||||
await setCachedTools(mergedTools);
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.delete(CacheKeys.TOOLS);
|
||||
logger.debug(`Merged ${count} app-level tools`);
|
||||
} catch (error) {
|
||||
logger.error('Failed to merge app-level tools:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Caches MCP server tools (no longer merges with global)
|
||||
* @param {object} params
|
||||
* @param {string} params.serverName
|
||||
* @param {import('@librechat/api').LCAvailableTools} params.serverTools
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function cacheMCPServerTools({ serverName, serverTools }) {
|
||||
try {
|
||||
const count = Object.keys(serverTools).length;
|
||||
if (!count) {
|
||||
return;
|
||||
}
|
||||
// Only cache server-specific tools, no merging with global
|
||||
await setCachedTools(serverTools, { serverName });
|
||||
logger.debug(`Cached ${count} MCP server tools for ${serverName}`);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to cache MCP server tools for ${serverName}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
mergeAppTools,
|
||||
cacheMCPServerTools,
|
||||
updateMCPServerTools,
|
||||
};
|
||||
@@ -1,143 +0,0 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, Constants } = require('librechat-data-provider');
|
||||
const { getCachedTools, setCachedTools } = require('./getCachedTools');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
/**
|
||||
* Updates MCP tools in the cache for a specific server and user
|
||||
* @param {Object} params - Parameters for updating MCP tools
|
||||
* @param {string} params.userId - User ID
|
||||
* @param {string} params.serverName - MCP server name
|
||||
* @param {Array} params.tools - Array of tool objects from MCP server
|
||||
* @returns {Promise<LCAvailableTools>}
|
||||
*/
|
||||
async function updateMCPUserTools({ userId, serverName, tools }) {
|
||||
try {
|
||||
const userTools = await getCachedTools({ userId });
|
||||
|
||||
const mcpDelimiter = Constants.mcp_delimiter;
|
||||
for (const key of Object.keys(userTools)) {
|
||||
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
|
||||
delete userTools[key];
|
||||
}
|
||||
}
|
||||
|
||||
for (const tool of tools) {
|
||||
const name = `${tool.name}${Constants.mcp_delimiter}${serverName}`;
|
||||
userTools[name] = {
|
||||
type: 'function',
|
||||
['function']: {
|
||||
name,
|
||||
description: tool.description,
|
||||
parameters: tool.inputSchema,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
await setCachedTools(userTools, { userId });
|
||||
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.delete(CacheKeys.TOOLS);
|
||||
logger.debug(`[MCP Cache] Updated ${tools.length} tools for ${serverName} user ${userId}`);
|
||||
return userTools;
|
||||
} catch (error) {
|
||||
logger.error(`[MCP Cache] Failed to update tools for ${serverName}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges app-level tools with global tools
|
||||
* @param {import('@librechat/api').LCAvailableTools} appTools
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function mergeAppTools(appTools) {
|
||||
try {
|
||||
const count = Object.keys(appTools).length;
|
||||
if (!count) {
|
||||
return;
|
||||
}
|
||||
const cachedTools = await getCachedTools({ includeGlobal: true });
|
||||
const mergedTools = { ...cachedTools, ...appTools };
|
||||
await setCachedTools(mergedTools, { isGlobal: true });
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.delete(CacheKeys.TOOLS);
|
||||
logger.debug(`Merged ${count} app-level tools`);
|
||||
} catch (error) {
|
||||
logger.error('Failed to merge app-level tools:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges user-level tools with global tools
|
||||
* @param {object} params
|
||||
* @param {string} params.userId
|
||||
* @param {Record<string, FunctionTool>} params.cachedUserTools
|
||||
* @param {import('@librechat/api').LCAvailableTools} params.userTools
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function mergeUserTools({ userId, cachedUserTools, userTools }) {
|
||||
try {
|
||||
if (!userId) {
|
||||
return;
|
||||
}
|
||||
const count = Object.keys(userTools).length;
|
||||
if (!count) {
|
||||
return;
|
||||
}
|
||||
const cachedTools = cachedUserTools ?? (await getCachedTools({ userId }));
|
||||
const mergedTools = { ...cachedTools, ...userTools };
|
||||
await setCachedTools(mergedTools, { userId });
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.delete(CacheKeys.TOOLS);
|
||||
logger.debug(`Merged ${count} user-level tools`);
|
||||
} catch (error) {
|
||||
logger.error('Failed to merge user-level tools:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all MCP tools for a specific server
|
||||
* @param {Object} params - Parameters for clearing MCP tools
|
||||
* @param {string} [params.userId] - User ID (if clearing user-specific tools)
|
||||
* @param {string} params.serverName - MCP server name
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async function clearMCPServerTools({ userId, serverName }) {
|
||||
try {
|
||||
const tools = await getCachedTools({ userId, includeGlobal: !userId });
|
||||
|
||||
// Remove all tools for this server
|
||||
const mcpDelimiter = Constants.mcp_delimiter;
|
||||
let removedCount = 0;
|
||||
for (const key of Object.keys(tools)) {
|
||||
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
|
||||
delete tools[key];
|
||||
removedCount++;
|
||||
}
|
||||
}
|
||||
|
||||
if (removedCount > 0) {
|
||||
await setCachedTools(tools, userId ? { userId } : { isGlobal: true });
|
||||
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
await cache.delete(CacheKeys.TOOLS);
|
||||
|
||||
logger.debug(
|
||||
`[MCP Cache] Removed ${removedCount} tools for ${serverName}${userId ? ` user ${userId}` : ' (global)'}`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[MCP Cache] Failed to clear tools for ${serverName}:`, error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
mergeAppTools,
|
||||
mergeUserTools,
|
||||
updateMCPUserTools,
|
||||
clearMCPServerTools,
|
||||
};
|
||||
@@ -1,6 +1,7 @@
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const {
|
||||
primeResources,
|
||||
getModelMaxTokens,
|
||||
extractLibreChatParams,
|
||||
optionalChainWithEmptyCheck,
|
||||
} = require('@librechat/api');
|
||||
@@ -17,7 +18,6 @@ const { getProviderConfig } = require('~/server/services/Endpoints');
|
||||
const { processFiles } = require('~/server/services/Files/process');
|
||||
const { getFiles, getToolFilesByIds } = require('~/models/File');
|
||||
const { getConvoFiles } = require('~/models/Conversation');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
|
||||
/**
|
||||
* @param {object} params
|
||||
|
||||
@@ -54,6 +54,11 @@ const addTitle = async (req, { text, response, client }) => {
|
||||
clearTimeout(timeoutId);
|
||||
}
|
||||
|
||||
if (!title) {
|
||||
logger.debug(`[${key}] No title generated`);
|
||||
return;
|
||||
}
|
||||
|
||||
await titleCache.set(key, title, 120000);
|
||||
await saveConvo(
|
||||
req,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const { getLLMConfig } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
|
||||
const AnthropicClient = require('~/app/clients/AnthropicClient');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
|
||||
@@ -40,7 +40,6 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
proxy: PROXY ?? null,
|
||||
userId: req.user.id,
|
||||
reverseProxyUrl: ANTHROPIC_REVERSE_PROXY ?? null,
|
||||
modelOptions: endpointOption?.model_parameters ?? {},
|
||||
},
|
||||
@@ -49,6 +48,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
if (overrideModel) {
|
||||
clientOptions.modelOptions.model = overrideModel;
|
||||
}
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
return getLLMConfig(anthropicApiKey, clientOptions);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,103 +0,0 @@
|
||||
const { ProxyAgent } = require('undici');
|
||||
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
|
||||
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
|
||||
|
||||
/**
|
||||
* Generates configuration options for creating an Anthropic language model (LLM) instance.
|
||||
*
|
||||
* @param {string} apiKey - The API key for authentication with Anthropic.
|
||||
* @param {Object} [options={}] - Additional options for configuring the LLM.
|
||||
* @param {Object} [options.modelOptions] - Model-specific options.
|
||||
* @param {string} [options.modelOptions.model] - The name of the model to use.
|
||||
* @param {number} [options.modelOptions.maxOutputTokens] - The maximum number of tokens to generate.
|
||||
* @param {number} [options.modelOptions.temperature] - Controls randomness in output generation.
|
||||
* @param {number} [options.modelOptions.topP] - Controls diversity of output generation.
|
||||
* @param {number} [options.modelOptions.topK] - Controls the number of top tokens to consider.
|
||||
* @param {string[]} [options.modelOptions.stop] - Sequences where the API will stop generating further tokens.
|
||||
* @param {boolean} [options.modelOptions.stream] - Whether to stream the response.
|
||||
* @param {string} options.userId - The user ID for tracking and personalization.
|
||||
* @param {string} [options.proxy] - Proxy server URL.
|
||||
* @param {string} [options.reverseProxyUrl] - URL for a reverse proxy, if used.
|
||||
*
|
||||
* @returns {Object} Configuration options for creating an Anthropic LLM instance, with null and undefined values removed.
|
||||
*/
|
||||
function getLLMConfig(apiKey, options = {}) {
|
||||
const systemOptions = {
|
||||
thinking: options.modelOptions.thinking ?? anthropicSettings.thinking.default,
|
||||
promptCache: options.modelOptions.promptCache ?? anthropicSettings.promptCache.default,
|
||||
thinkingBudget: options.modelOptions.thinkingBudget ?? anthropicSettings.thinkingBudget.default,
|
||||
};
|
||||
for (let key in systemOptions) {
|
||||
delete options.modelOptions[key];
|
||||
}
|
||||
const defaultOptions = {
|
||||
model: anthropicSettings.model.default,
|
||||
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
const mergedOptions = Object.assign(defaultOptions, options.modelOptions);
|
||||
|
||||
/** @type {AnthropicClientOptions} */
|
||||
let requestOptions = {
|
||||
apiKey,
|
||||
model: mergedOptions.model,
|
||||
stream: mergedOptions.stream,
|
||||
temperature: mergedOptions.temperature,
|
||||
stopSequences: mergedOptions.stop,
|
||||
maxTokens:
|
||||
mergedOptions.maxOutputTokens || anthropicSettings.maxOutputTokens.reset(mergedOptions.model),
|
||||
clientOptions: {},
|
||||
invocationKwargs: {
|
||||
metadata: {
|
||||
user_id: options.userId,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
requestOptions = configureReasoning(requestOptions, systemOptions);
|
||||
|
||||
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
|
||||
requestOptions.topP = mergedOptions.topP;
|
||||
requestOptions.topK = mergedOptions.topK;
|
||||
} else if (requestOptions.thinking == null) {
|
||||
requestOptions.topP = mergedOptions.topP;
|
||||
requestOptions.topK = mergedOptions.topK;
|
||||
}
|
||||
|
||||
const supportsCacheControl =
|
||||
systemOptions.promptCache === true && checkPromptCacheSupport(requestOptions.model);
|
||||
const headers = getClaudeHeaders(requestOptions.model, supportsCacheControl);
|
||||
if (headers) {
|
||||
requestOptions.clientOptions.defaultHeaders = headers;
|
||||
}
|
||||
|
||||
if (options.proxy) {
|
||||
const proxyAgent = new ProxyAgent(options.proxy);
|
||||
requestOptions.clientOptions.fetchOptions = {
|
||||
dispatcher: proxyAgent,
|
||||
};
|
||||
}
|
||||
|
||||
if (options.reverseProxyUrl) {
|
||||
requestOptions.clientOptions.baseURL = options.reverseProxyUrl;
|
||||
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
|
||||
}
|
||||
|
||||
const tools = [];
|
||||
|
||||
if (mergedOptions.web_search) {
|
||||
tools.push({
|
||||
type: 'web_search_20250305',
|
||||
name: 'web_search',
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
tools,
|
||||
/** @type {AnthropicClientOptions} */
|
||||
llmConfig: removeNullishValues(requestOptions),
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = { getLLMConfig };
|
||||
@@ -1,341 +0,0 @@
|
||||
const { getLLMConfig } = require('~/server/services/Endpoints/anthropic/llm');
|
||||
|
||||
jest.mock('https-proxy-agent', () => ({
|
||||
HttpsProxyAgent: jest.fn().mockImplementation((proxy) => ({ proxy })),
|
||||
}));
|
||||
|
||||
describe('getLLMConfig', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should create a basic configuration with default values', () => {
|
||||
const result = getLLMConfig('test-api-key', { modelOptions: {} });
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('apiKey', 'test-api-key');
|
||||
expect(result.llmConfig).toHaveProperty('model', 'claude-3-5-sonnet-latest');
|
||||
expect(result.llmConfig).toHaveProperty('stream', true);
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens');
|
||||
});
|
||||
|
||||
it('should include proxy settings when provided', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
proxy: 'http://proxy:8080',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('fetchOptions');
|
||||
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
|
||||
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher).toBeDefined();
|
||||
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
|
||||
'ProxyAgent',
|
||||
);
|
||||
});
|
||||
|
||||
it('should include reverse proxy URL when provided', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
reverseProxyUrl: 'http://reverse-proxy',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'http://reverse-proxy');
|
||||
expect(result.llmConfig).toHaveProperty('anthropicApiUrl', 'http://reverse-proxy');
|
||||
});
|
||||
|
||||
it('should include topK and topP for non-Claude-3.7 models', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
it('should include topK and topP for Claude-3.5 models', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3-7 models with thinking enabled (hyphen notation)', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
thinking: true,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('topK');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
expect(result.llmConfig).toHaveProperty('thinking');
|
||||
expect(result.llmConfig.thinking).toHaveProperty('type', 'enabled');
|
||||
// When thinking is enabled, it uses the default thinkingBudget of 2000
|
||||
expect(result.llmConfig.thinking).toHaveProperty('budget_tokens', 2000);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model', () => {
|
||||
const modelOptions = {
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
promptCache: true,
|
||||
};
|
||||
const result = getLLMConfig('test-key', { modelOptions });
|
||||
const clientOptions = result.llmConfig.clientOptions;
|
||||
expect(clientOptions.defaultHeaders).toBeDefined();
|
||||
expect(clientOptions.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(clientOptions.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31,context-1m-2025-08-07',
|
||||
);
|
||||
});
|
||||
|
||||
it('should add "prompt-caching" and "context-1m" beta headers for claude-sonnet-4 model formats', () => {
|
||||
const modelVariations = [
|
||||
'claude-sonnet-4-20250514',
|
||||
'claude-sonnet-4-latest',
|
||||
'anthropic/claude-sonnet-4-20250514',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
const modelOptions = { model, promptCache: true };
|
||||
const result = getLLMConfig('test-key', { modelOptions });
|
||||
const clientOptions = result.llmConfig.clientOptions;
|
||||
expect(clientOptions.defaultHeaders).toBeDefined();
|
||||
expect(clientOptions.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(clientOptions.defaultHeaders['anthropic-beta']).toBe(
|
||||
'prompt-caching-2024-07-31,context-1m-2025-08-07',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should NOT include topK and topP for Claude-3.7 models with thinking enabled (decimal notation)', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
thinking: true,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('topK');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
expect(result.llmConfig).toHaveProperty('thinking');
|
||||
expect(result.llmConfig.thinking).toHaveProperty('type', 'enabled');
|
||||
// When thinking is enabled, it uses the default thinkingBudget of 2000
|
||||
expect(result.llmConfig.thinking).toHaveProperty('budget_tokens', 2000);
|
||||
});
|
||||
|
||||
it('should handle custom maxOutputTokens', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
maxOutputTokens: 2048,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 2048);
|
||||
});
|
||||
|
||||
it('should handle promptCache setting', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
promptCache: true,
|
||||
},
|
||||
});
|
||||
|
||||
// We're not checking specific header values since that depends on the actual helper function
|
||||
// Just verifying that the promptCache setting is processed
|
||||
expect(result.llmConfig).toBeDefined();
|
||||
});
|
||||
|
||||
it('should include topK and topP for Claude-3.7 models when thinking is not enabled', () => {
|
||||
// Test with thinking explicitly set to null/undefined
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
thinking: false,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result.llmConfig).toHaveProperty('topP', 0.9);
|
||||
|
||||
// Test with thinking explicitly set to false
|
||||
const result2 = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
thinking: false,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result2.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result2.llmConfig).toHaveProperty('topP', 0.9);
|
||||
|
||||
// Test with decimal notation as well
|
||||
const result3 = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3.7-sonnet',
|
||||
topK: 10,
|
||||
topP: 0.9,
|
||||
thinking: false,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result3.llmConfig).toHaveProperty('topK', 10);
|
||||
expect(result3.llmConfig).toHaveProperty('topP', 0.9);
|
||||
});
|
||||
|
||||
describe('Edge cases', () => {
|
||||
it('should handle missing apiKey', () => {
|
||||
const result = getLLMConfig(undefined, { modelOptions: {} });
|
||||
expect(result.llmConfig).not.toHaveProperty('apiKey');
|
||||
});
|
||||
|
||||
it('should handle empty modelOptions', () => {
|
||||
expect(() => {
|
||||
getLLMConfig('test-api-key', {});
|
||||
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
|
||||
});
|
||||
|
||||
it('should handle no options parameter', () => {
|
||||
expect(() => {
|
||||
getLLMConfig('test-api-key');
|
||||
}).toThrow("Cannot read properties of undefined (reading 'thinking')");
|
||||
});
|
||||
|
||||
it('should handle temperature, stop sequences, and stream settings', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
temperature: 0.7,
|
||||
stop: ['\n\n', 'END'],
|
||||
stream: false,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).toHaveProperty('temperature', 0.7);
|
||||
expect(result.llmConfig).toHaveProperty('stopSequences', ['\n\n', 'END']);
|
||||
expect(result.llmConfig).toHaveProperty('stream', false);
|
||||
});
|
||||
|
||||
it('should handle maxOutputTokens when explicitly set to falsy value', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus',
|
||||
maxOutputTokens: null,
|
||||
},
|
||||
});
|
||||
|
||||
// The actual anthropicSettings.maxOutputTokens.reset('claude-3-opus') returns 4096
|
||||
expect(result.llmConfig).toHaveProperty('maxTokens', 4096);
|
||||
});
|
||||
|
||||
it('should handle both proxy and reverseProxyUrl', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {},
|
||||
proxy: 'http://proxy:8080',
|
||||
reverseProxyUrl: 'https://reverse-proxy.com',
|
||||
});
|
||||
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('fetchOptions');
|
||||
expect(result.llmConfig.clientOptions.fetchOptions).toHaveProperty('dispatcher');
|
||||
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher).toBeDefined();
|
||||
expect(result.llmConfig.clientOptions.fetchOptions.dispatcher.constructor.name).toBe(
|
||||
'ProxyAgent',
|
||||
);
|
||||
expect(result.llmConfig.clientOptions).toHaveProperty('baseURL', 'https://reverse-proxy.com');
|
||||
expect(result.llmConfig).toHaveProperty('anthropicApiUrl', 'https://reverse-proxy.com');
|
||||
});
|
||||
|
||||
it('should handle prompt cache with supported model', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-5-sonnet',
|
||||
promptCache: true,
|
||||
},
|
||||
});
|
||||
|
||||
// claude-3-5-sonnet supports prompt caching and should get the appropriate headers
|
||||
expect(result.llmConfig.clientOptions.defaultHeaders).toEqual({
|
||||
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle thinking and thinkingBudget options', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
thinking: true,
|
||||
thinkingBudget: 10000, // This exceeds the default max_tokens of 8192
|
||||
},
|
||||
});
|
||||
|
||||
// The function should add thinking configuration for claude-3-7 models
|
||||
expect(result.llmConfig).toHaveProperty('thinking');
|
||||
expect(result.llmConfig.thinking).toHaveProperty('type', 'enabled');
|
||||
// With claude-3-7-sonnet, the max_tokens default is 8192
|
||||
// Budget tokens gets adjusted to 90% of max_tokens (8192 * 0.9 = 7372) when it exceeds max_tokens
|
||||
expect(result.llmConfig.thinking).toHaveProperty('budget_tokens', 7372);
|
||||
|
||||
// Test with budget_tokens within max_tokens limit
|
||||
const result2 = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
model: 'claude-3-7-sonnet',
|
||||
thinking: true,
|
||||
thinkingBudget: 2000,
|
||||
},
|
||||
});
|
||||
|
||||
expect(result2.llmConfig.thinking).toHaveProperty('budget_tokens', 2000);
|
||||
});
|
||||
|
||||
it('should remove system options from modelOptions', () => {
|
||||
const modelOptions = {
|
||||
model: 'claude-3-opus',
|
||||
thinking: true,
|
||||
promptCache: true,
|
||||
thinkingBudget: 1000,
|
||||
temperature: 0.5,
|
||||
};
|
||||
|
||||
getLLMConfig('test-api-key', { modelOptions });
|
||||
|
||||
expect(modelOptions).not.toHaveProperty('thinking');
|
||||
expect(modelOptions).not.toHaveProperty('promptCache');
|
||||
expect(modelOptions).not.toHaveProperty('thinkingBudget');
|
||||
expect(modelOptions).toHaveProperty('temperature', 0.5);
|
||||
});
|
||||
|
||||
it('should handle all nullish values removal', () => {
|
||||
const result = getLLMConfig('test-api-key', {
|
||||
modelOptions: {
|
||||
temperature: null,
|
||||
topP: undefined,
|
||||
topK: 0,
|
||||
stop: [],
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.llmConfig).not.toHaveProperty('temperature');
|
||||
expect(result.llmConfig).not.toHaveProperty('topP');
|
||||
expect(result.llmConfig).toHaveProperty('topK', 0);
|
||||
expect(result.llmConfig).toHaveProperty('stopSequences', []);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,3 +1,4 @@
|
||||
const { getModelMaxTokens } = require('@librechat/api');
|
||||
const { createContentAggregator } = require('@librechat/agents');
|
||||
const {
|
||||
EModelEndpoint,
|
||||
@@ -7,7 +8,6 @@ const {
|
||||
const { getDefaultHandlers } = require('~/server/controllers/agents/callbacks');
|
||||
const getOptions = require('~/server/services/Endpoints/bedrock/options');
|
||||
const AgentClient = require('~/server/controllers/agents/client');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption }) => {
|
||||
if (!endpointOption) {
|
||||
|
||||
@@ -552,7 +552,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
||||
throw new Error('File search is not enabled for Agents');
|
||||
}
|
||||
// Note: File search processing continues to dual storage logic below
|
||||
} else if (tool_resource === EToolResources.ocr) {
|
||||
} else if (tool_resource === EToolResources.context) {
|
||||
const { file_id, temp_file_id = null } = metadata;
|
||||
|
||||
/**
|
||||
@@ -594,10 +594,9 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
||||
|
||||
const fileConfig = mergeFileConfig(appConfig.fileConfig);
|
||||
|
||||
const shouldUseOCR = fileConfig.checkType(
|
||||
file.mimetype,
|
||||
fileConfig.ocr?.supportedMimeTypes || [],
|
||||
);
|
||||
const shouldUseOCR =
|
||||
appConfig?.ocr != null &&
|
||||
fileConfig.checkType(file.mimetype, fileConfig.ocr?.supportedMimeTypes || []);
|
||||
|
||||
if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) {
|
||||
throw new Error('OCR capability is not enabled for Agents');
|
||||
@@ -626,7 +625,7 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
||||
);
|
||||
|
||||
if (!shouldUseText) {
|
||||
throw new Error(`File type ${file.mimetype} is not supported for OCR or text parsing`);
|
||||
throw new Error(`File type ${file.mimetype} is not supported for text parsing.`);
|
||||
}
|
||||
|
||||
const { text, bytes } = await parseText({ req, file, file_id });
|
||||
|
||||
@@ -20,10 +20,10 @@ const {
|
||||
ContentTypes,
|
||||
isAssistantsEndpoint,
|
||||
} = require('librechat-data-provider');
|
||||
const { getMCPManager, getFlowStateManager, getOAuthReconnectionManager } = require('~/config');
|
||||
const { findToken, createToken, updateToken } = require('~/models');
|
||||
const { getMCPManager, getFlowStateManager } = require('~/config');
|
||||
const { getCachedTools, getAppConfig } = require('./Config');
|
||||
const { reinitMCPServer } = require('./Tools/mcp');
|
||||
const { getAppConfig } = require('./Config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
/**
|
||||
@@ -152,8 +152,8 @@ function createOAuthCallback({ runStepEmitter, runStepDeltaEmitter }) {
|
||||
|
||||
/**
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req - The Express request object, containing user/request info.
|
||||
* @param {ServerResponse} params.res - The Express response object for sending events.
|
||||
* @param {IUser} params.user - The user from the request object.
|
||||
* @param {string} params.serverName
|
||||
* @param {AbortSignal} params.signal
|
||||
* @param {string} params.model
|
||||
@@ -161,9 +161,9 @@ function createOAuthCallback({ runStepEmitter, runStepDeltaEmitter }) {
|
||||
* @param {Record<string, Record<string, string>>} [params.userMCPAuthMap]
|
||||
* @returns { Promise<Array<typeof tool | { _call: (toolInput: Object | string) => unknown}>> } An object with `_call` method to execute the tool input.
|
||||
*/
|
||||
async function reconnectServer({ req, res, index, signal, serverName, userMCPAuthMap }) {
|
||||
async function reconnectServer({ res, user, index, signal, serverName, userMCPAuthMap }) {
|
||||
const runId = Constants.USE_PRELIM_RESPONSE_MESSAGE_ID;
|
||||
const flowId = `${req.user?.id}:${serverName}:${Date.now()}`;
|
||||
const flowId = `${user.id}:${serverName}:${Date.now()}`;
|
||||
const flowManager = getFlowStateManager(getLogStores(CacheKeys.FLOWS));
|
||||
const stepId = 'step_oauth_login_' + serverName;
|
||||
const toolCall = {
|
||||
@@ -192,7 +192,7 @@ async function reconnectServer({ req, res, index, signal, serverName, userMCPAut
|
||||
flowManager,
|
||||
});
|
||||
return await reinitMCPServer({
|
||||
req,
|
||||
user,
|
||||
signal,
|
||||
serverName,
|
||||
oauthStart,
|
||||
@@ -211,8 +211,8 @@ async function reconnectServer({ req, res, index, signal, serverName, userMCPAut
|
||||
* i.e. `availableTools`, and will reinitialize the MCP server to ensure all tools are generated.
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req - The Express request object, containing user/request info.
|
||||
* @param {ServerResponse} params.res - The Express response object for sending events.
|
||||
* @param {IUser} params.user - The user from the request object.
|
||||
* @param {string} params.serverName
|
||||
* @param {string} params.model
|
||||
* @param {Providers | EModelEndpoint} params.provider - The provider for the tool.
|
||||
@@ -221,8 +221,8 @@ async function reconnectServer({ req, res, index, signal, serverName, userMCPAut
|
||||
* @param {Record<string, Record<string, string>>} [params.userMCPAuthMap]
|
||||
* @returns { Promise<Array<typeof tool | { _call: (toolInput: Object | string) => unknown}>> } An object with `_call` method to execute the tool input.
|
||||
*/
|
||||
async function createMCPTools({ req, res, index, signal, serverName, provider, userMCPAuthMap }) {
|
||||
const result = await reconnectServer({ req, res, index, signal, serverName, userMCPAuthMap });
|
||||
async function createMCPTools({ res, user, index, signal, serverName, provider, userMCPAuthMap }) {
|
||||
const result = await reconnectServer({ res, user, index, signal, serverName, userMCPAuthMap });
|
||||
if (!result || !result.tools) {
|
||||
logger.warn(`[MCP][${serverName}] Failed to reinitialize MCP server.`);
|
||||
return;
|
||||
@@ -231,8 +231,8 @@ async function createMCPTools({ req, res, index, signal, serverName, provider, u
|
||||
const serverTools = [];
|
||||
for (const tool of result.tools) {
|
||||
const toolInstance = await createMCPTool({
|
||||
req,
|
||||
res,
|
||||
user,
|
||||
provider,
|
||||
userMCPAuthMap,
|
||||
availableTools: result.availableTools,
|
||||
@@ -249,8 +249,8 @@ async function createMCPTools({ req, res, index, signal, serverName, provider, u
|
||||
/**
|
||||
* Creates a single tool from the specified MCP Server via `toolKey`.
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req - The Express request object, containing user/request info.
|
||||
* @param {ServerResponse} params.res - The Express response object for sending events.
|
||||
* @param {IUser} params.user - The user from the request object.
|
||||
* @param {string} params.toolKey - The toolKey for the tool.
|
||||
* @param {string} params.model - The model for the tool.
|
||||
* @param {number} [params.index]
|
||||
@@ -261,26 +261,31 @@ async function createMCPTools({ req, res, index, signal, serverName, provider, u
|
||||
* @returns { Promise<typeof tool | { _call: (toolInput: Object | string) => unknown}> } An object with `_call` method to execute the tool input.
|
||||
*/
|
||||
async function createMCPTool({
|
||||
req,
|
||||
res,
|
||||
user,
|
||||
index,
|
||||
signal,
|
||||
toolKey,
|
||||
provider,
|
||||
userMCPAuthMap,
|
||||
availableTools: tools,
|
||||
availableTools,
|
||||
}) {
|
||||
const [toolName, serverName] = toolKey.split(Constants.mcp_delimiter);
|
||||
|
||||
const availableTools =
|
||||
tools ?? (await getCachedTools({ userId: req.user?.id, includeGlobal: true }));
|
||||
/** @type {LCTool | undefined} */
|
||||
let toolDefinition = availableTools?.[toolKey]?.function;
|
||||
if (!toolDefinition) {
|
||||
logger.warn(
|
||||
`[MCP][${serverName}][${toolName}] Requested tool not found in available tools, re-initializing MCP server.`,
|
||||
);
|
||||
const result = await reconnectServer({ req, res, index, signal, serverName, userMCPAuthMap });
|
||||
const result = await reconnectServer({
|
||||
res,
|
||||
user,
|
||||
index,
|
||||
signal,
|
||||
serverName,
|
||||
userMCPAuthMap,
|
||||
});
|
||||
toolDefinition = result?.availableTools?.[toolKey]?.function;
|
||||
}
|
||||
|
||||
@@ -437,10 +442,10 @@ async function getMCPSetupData(userId) {
|
||||
}
|
||||
|
||||
const mcpManager = getMCPManager(userId);
|
||||
/** @type {ReturnType<MCPManager['getAllConnections']>} */
|
||||
/** @type {Map<string, import('@librechat/api').MCPConnection>} */
|
||||
let appConnections = new Map();
|
||||
try {
|
||||
appConnections = (await mcpManager.getAllConnections()) || new Map();
|
||||
appConnections = (await mcpManager.appConnections?.getAll()) || new Map();
|
||||
} catch (error) {
|
||||
logger.error(`[MCP][User: ${userId}] Error getting app connections:`, error);
|
||||
}
|
||||
@@ -538,13 +543,20 @@ async function getServerConnectionStatus(
|
||||
const baseConnectionState = getConnectionState();
|
||||
let finalConnectionState = baseConnectionState;
|
||||
|
||||
// connection state overrides specific to OAuth servers
|
||||
if (baseConnectionState === 'disconnected' && oauthServers.has(serverName)) {
|
||||
const { hasActiveFlow, hasFailedFlow } = await checkOAuthFlowStatus(userId, serverName);
|
||||
|
||||
if (hasFailedFlow) {
|
||||
finalConnectionState = 'error';
|
||||
} else if (hasActiveFlow) {
|
||||
// check if server is actively being reconnected
|
||||
const oauthReconnectionManager = getOAuthReconnectionManager();
|
||||
if (oauthReconnectionManager.isReconnecting(userId, serverName)) {
|
||||
finalConnectionState = 'connecting';
|
||||
} else {
|
||||
const { hasActiveFlow, hasFailedFlow } = await checkOAuthFlowStatus(userId, serverName);
|
||||
|
||||
if (hasFailedFlow) {
|
||||
finalConnectionState = 'error';
|
||||
} else if (hasActiveFlow) {
|
||||
finalConnectionState = 'connecting';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,45 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { MCPOAuthHandler } = require('@librechat/api');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { getMCPSetupData, checkOAuthFlowStatus, getServerConnectionStatus } = require('./MCP');
|
||||
const {
|
||||
createMCPTool,
|
||||
createMCPTools,
|
||||
getMCPSetupData,
|
||||
checkOAuthFlowStatus,
|
||||
getServerConnectionStatus,
|
||||
} = require('./MCP');
|
||||
|
||||
// Mock all dependencies
|
||||
jest.mock('@librechat/data-schemas', () => ({
|
||||
logger: {
|
||||
debug: jest.fn(),
|
||||
error: jest.fn(),
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('@langchain/core/tools', () => ({
|
||||
tool: jest.fn((fn, config) => {
|
||||
const toolInstance = { _call: fn, ...config };
|
||||
return toolInstance;
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/agents', () => ({
|
||||
Providers: {
|
||||
VERTEXAI: 'vertexai',
|
||||
GOOGLE: 'google',
|
||||
},
|
||||
StepTypes: {
|
||||
TOOL_CALLS: 'tool_calls',
|
||||
},
|
||||
GraphEvents: {
|
||||
ON_RUN_STEP_DELTA: 'on_run_step_delta',
|
||||
ON_RUN_STEP: 'on_run_step',
|
||||
},
|
||||
Constants: {
|
||||
CONTENT_AND_ARTIFACT: 'content_and_artifact',
|
||||
},
|
||||
}));
|
||||
|
||||
@@ -15,12 +47,27 @@ jest.mock('@librechat/api', () => ({
|
||||
MCPOAuthHandler: {
|
||||
generateFlowId: jest.fn(),
|
||||
},
|
||||
sendEvent: jest.fn(),
|
||||
normalizeServerName: jest.fn((name) => name),
|
||||
convertWithResolvedRefs: jest.fn((params) => params),
|
||||
}));
|
||||
|
||||
jest.mock('librechat-data-provider', () => ({
|
||||
CacheKeys: {
|
||||
FLOWS: 'flows',
|
||||
},
|
||||
Constants: {
|
||||
USE_PRELIM_RESPONSE_MESSAGE_ID: 'prelim_response_id',
|
||||
mcp_delimiter: '::',
|
||||
mcp_prefix: 'mcp_',
|
||||
},
|
||||
ContentTypes: {
|
||||
TEXT: 'text',
|
||||
},
|
||||
isAssistantsEndpoint: jest.fn(() => false),
|
||||
Time: {
|
||||
TWO_MINUTES: 120000,
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('./Config', () => ({
|
||||
@@ -31,6 +78,7 @@ jest.mock('./Config', () => ({
|
||||
jest.mock('~/config', () => ({
|
||||
getMCPManager: jest.fn(),
|
||||
getFlowStateManager: jest.fn(),
|
||||
getOAuthReconnectionManager: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/cache', () => ({
|
||||
@@ -43,19 +91,23 @@ jest.mock('~/models', () => ({
|
||||
updateToken: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('./Tools/mcp', () => ({
|
||||
reinitMCPServer: jest.fn(),
|
||||
}));
|
||||
|
||||
describe('tests for the new helper functions used by the MCP connection status endpoints', () => {
|
||||
let mockLoadCustomConfig;
|
||||
let mockGetMCPManager;
|
||||
let mockGetFlowStateManager;
|
||||
let mockGetLogStores;
|
||||
let mockGetOAuthReconnectionManager;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
mockLoadCustomConfig = require('./Config').loadCustomConfig;
|
||||
mockGetMCPManager = require('~/config').getMCPManager;
|
||||
mockGetFlowStateManager = require('~/config').getFlowStateManager;
|
||||
mockGetLogStores = require('~/cache').getLogStores;
|
||||
mockGetOAuthReconnectionManager = require('~/config').getOAuthReconnectionManager;
|
||||
});
|
||||
|
||||
describe('getMCPSetupData', () => {
|
||||
@@ -71,7 +123,7 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
beforeEach(() => {
|
||||
mockGetAppConfig = require('./Config').getAppConfig;
|
||||
mockGetMCPManager.mockReturnValue({
|
||||
getAllConnections: jest.fn(() => new Map()),
|
||||
appConnections: { getAll: jest.fn(() => new Map()) },
|
||||
getUserConnections: jest.fn(() => new Map()),
|
||||
getOAuthServers: jest.fn(() => new Set()),
|
||||
});
|
||||
@@ -85,7 +137,7 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
const mockOAuthServers = new Set(['server2']);
|
||||
|
||||
const mockMCPManager = {
|
||||
getAllConnections: jest.fn(() => mockAppConnections),
|
||||
appConnections: { getAll: jest.fn(() => mockAppConnections) },
|
||||
getUserConnections: jest.fn(() => mockUserConnections),
|
||||
getOAuthServers: jest.fn(() => mockOAuthServers),
|
||||
};
|
||||
@@ -95,7 +147,7 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
|
||||
expect(mockGetAppConfig).toHaveBeenCalled();
|
||||
expect(mockGetMCPManager).toHaveBeenCalledWith(mockUserId);
|
||||
expect(mockMCPManager.getAllConnections).toHaveBeenCalled();
|
||||
expect(mockMCPManager.appConnections.getAll).toHaveBeenCalled();
|
||||
expect(mockMCPManager.getUserConnections).toHaveBeenCalledWith(mockUserId);
|
||||
expect(mockMCPManager.getOAuthServers).toHaveBeenCalled();
|
||||
|
||||
@@ -116,7 +168,7 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
mockGetAppConfig.mockResolvedValue({ mcpConfig: mockConfig.mcpServers });
|
||||
|
||||
const mockMCPManager = {
|
||||
getAllConnections: jest.fn(() => null),
|
||||
appConnections: { getAll: jest.fn(() => null) },
|
||||
getUserConnections: jest.fn(() => null),
|
||||
getOAuthServers: jest.fn(() => null),
|
||||
};
|
||||
@@ -354,6 +406,12 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
const userConnections = new Map();
|
||||
const oauthServers = new Set([mockServerName]);
|
||||
|
||||
// Mock OAuthReconnectionManager
|
||||
const mockOAuthReconnectionManager = {
|
||||
isReconnecting: jest.fn(() => false),
|
||||
};
|
||||
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
|
||||
|
||||
const result = await getServerConnectionStatus(
|
||||
mockUserId,
|
||||
mockServerName,
|
||||
@@ -370,6 +428,12 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
const userConnections = new Map();
|
||||
const oauthServers = new Set([mockServerName]);
|
||||
|
||||
// Mock OAuthReconnectionManager
|
||||
const mockOAuthReconnectionManager = {
|
||||
isReconnecting: jest.fn(() => false),
|
||||
};
|
||||
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
|
||||
|
||||
// Mock flow state to return failed flow
|
||||
const mockFlowManager = {
|
||||
getFlowState: jest.fn(() => ({
|
||||
@@ -401,6 +465,12 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
const userConnections = new Map();
|
||||
const oauthServers = new Set([mockServerName]);
|
||||
|
||||
// Mock OAuthReconnectionManager
|
||||
const mockOAuthReconnectionManager = {
|
||||
isReconnecting: jest.fn(() => false),
|
||||
};
|
||||
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
|
||||
|
||||
// Mock flow state to return active flow
|
||||
const mockFlowManager = {
|
||||
getFlowState: jest.fn(() => ({
|
||||
@@ -432,6 +502,12 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
const userConnections = new Map();
|
||||
const oauthServers = new Set([mockServerName]);
|
||||
|
||||
// Mock OAuthReconnectionManager
|
||||
const mockOAuthReconnectionManager = {
|
||||
isReconnecting: jest.fn(() => false),
|
||||
};
|
||||
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
|
||||
|
||||
// Mock flow state to return no flow
|
||||
const mockFlowManager = {
|
||||
getFlowState: jest.fn(() => null),
|
||||
@@ -454,6 +530,35 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
});
|
||||
});
|
||||
|
||||
it('should return connecting state when OAuth server is reconnecting', async () => {
|
||||
const appConnections = new Map();
|
||||
const userConnections = new Map();
|
||||
const oauthServers = new Set([mockServerName]);
|
||||
|
||||
// Mock OAuthReconnectionManager to return true for isReconnecting
|
||||
const mockOAuthReconnectionManager = {
|
||||
isReconnecting: jest.fn(() => true),
|
||||
};
|
||||
mockGetOAuthReconnectionManager.mockReturnValue(mockOAuthReconnectionManager);
|
||||
|
||||
const result = await getServerConnectionStatus(
|
||||
mockUserId,
|
||||
mockServerName,
|
||||
appConnections,
|
||||
userConnections,
|
||||
oauthServers,
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
requiresOAuth: true,
|
||||
connectionState: 'connecting',
|
||||
});
|
||||
expect(mockOAuthReconnectionManager.isReconnecting).toHaveBeenCalledWith(
|
||||
mockUserId,
|
||||
mockServerName,
|
||||
);
|
||||
});
|
||||
|
||||
it('should not check OAuth flow status when server is connected', async () => {
|
||||
const mockFlowManager = {
|
||||
getFlowState: jest.fn(),
|
||||
@@ -511,3 +616,275 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('User parameter passing tests', () => {
|
||||
let mockReinitMCPServer;
|
||||
let mockGetFlowStateManager;
|
||||
let mockGetLogStores;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockReinitMCPServer = require('./Tools/mcp').reinitMCPServer;
|
||||
mockGetFlowStateManager = require('~/config').getFlowStateManager;
|
||||
mockGetLogStores = require('~/cache').getLogStores;
|
||||
|
||||
// Setup default mocks
|
||||
mockGetLogStores.mockReturnValue({});
|
||||
mockGetFlowStateManager.mockReturnValue({
|
||||
createFlowWithHandler: jest.fn(),
|
||||
failFlow: jest.fn(),
|
||||
});
|
||||
});
|
||||
|
||||
describe('createMCPTools', () => {
|
||||
it('should pass user parameter to reinitMCPServer when calling reconnectServer internally', async () => {
|
||||
const mockUser = { id: 'test-user-123', name: 'Test User' };
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
const mockSignal = new AbortController().signal;
|
||||
|
||||
mockReinitMCPServer.mockResolvedValue({
|
||||
tools: [{ name: 'test-tool' }],
|
||||
availableTools: {
|
||||
'test-tool::test-server': {
|
||||
function: {
|
||||
description: 'Test tool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await createMCPTools({
|
||||
res: mockRes,
|
||||
user: mockUser,
|
||||
serverName: 'test-server',
|
||||
provider: 'openai',
|
||||
signal: mockSignal,
|
||||
userMCPAuthMap: {},
|
||||
});
|
||||
|
||||
// Verify reinitMCPServer was called with the user
|
||||
expect(mockReinitMCPServer).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
user: mockUser,
|
||||
serverName: 'test-server',
|
||||
}),
|
||||
);
|
||||
expect(mockReinitMCPServer.mock.calls[0][0].user).toBe(mockUser);
|
||||
});
|
||||
|
||||
it('should throw error if user is not provided', async () => {
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
|
||||
mockReinitMCPServer.mockResolvedValue({
|
||||
tools: [],
|
||||
availableTools: {},
|
||||
});
|
||||
|
||||
// Call without user should throw error
|
||||
await expect(
|
||||
createMCPTools({
|
||||
res: mockRes,
|
||||
user: undefined,
|
||||
serverName: 'test-server',
|
||||
provider: 'openai',
|
||||
userMCPAuthMap: {},
|
||||
}),
|
||||
).rejects.toThrow("Cannot read properties of undefined (reading 'id')");
|
||||
|
||||
// Verify reinitMCPServer was not called due to early error
|
||||
expect(mockReinitMCPServer).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('createMCPTool', () => {
|
||||
it('should pass user parameter to reinitMCPServer when tool not in cache', async () => {
|
||||
const mockUser = { id: 'test-user-456', email: 'test@example.com' };
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
const mockSignal = new AbortController().signal;
|
||||
|
||||
mockReinitMCPServer.mockResolvedValue({
|
||||
availableTools: {
|
||||
'test-tool::test-server': {
|
||||
function: {
|
||||
description: 'Test tool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
// Call without availableTools to trigger reinit
|
||||
await createMCPTool({
|
||||
res: mockRes,
|
||||
user: mockUser,
|
||||
toolKey: 'test-tool::test-server',
|
||||
provider: 'openai',
|
||||
signal: mockSignal,
|
||||
userMCPAuthMap: {},
|
||||
availableTools: undefined, // Force reinit
|
||||
});
|
||||
|
||||
// Verify reinitMCPServer was called with the user
|
||||
expect(mockReinitMCPServer).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
user: mockUser,
|
||||
serverName: 'test-server',
|
||||
}),
|
||||
);
|
||||
expect(mockReinitMCPServer.mock.calls[0][0].user).toBe(mockUser);
|
||||
});
|
||||
|
||||
it('should not call reinitMCPServer when tool is in cache', async () => {
|
||||
const mockUser = { id: 'test-user-789' };
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
|
||||
const availableTools = {
|
||||
'test-tool::test-server': {
|
||||
function: {
|
||||
description: 'Cached tool',
|
||||
parameters: { type: 'object', properties: {} },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await createMCPTool({
|
||||
res: mockRes,
|
||||
user: mockUser,
|
||||
toolKey: 'test-tool::test-server',
|
||||
provider: 'openai',
|
||||
userMCPAuthMap: {},
|
||||
availableTools: availableTools,
|
||||
});
|
||||
|
||||
// Verify reinitMCPServer was NOT called since tool was in cache
|
||||
expect(mockReinitMCPServer).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('reinitMCPServer (via reconnectServer)', () => {
|
||||
it('should always receive user parameter when called from createMCPTools', async () => {
|
||||
const mockUser = { id: 'user-001', role: 'admin' };
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
|
||||
// Track all calls to reinitMCPServer
|
||||
const reinitCalls = [];
|
||||
mockReinitMCPServer.mockImplementation((params) => {
|
||||
reinitCalls.push(params);
|
||||
return Promise.resolve({
|
||||
tools: [{ name: 'tool1' }, { name: 'tool2' }],
|
||||
availableTools: {
|
||||
'tool1::server1': { function: { description: 'Tool 1', parameters: {} } },
|
||||
'tool2::server1': { function: { description: 'Tool 2', parameters: {} } },
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
await createMCPTools({
|
||||
res: mockRes,
|
||||
user: mockUser,
|
||||
serverName: 'server1',
|
||||
provider: 'anthropic',
|
||||
userMCPAuthMap: {},
|
||||
});
|
||||
|
||||
// Verify all calls to reinitMCPServer had the user
|
||||
expect(reinitCalls.length).toBeGreaterThan(0);
|
||||
reinitCalls.forEach((call) => {
|
||||
expect(call.user).toBe(mockUser);
|
||||
expect(call.user.id).toBe('user-001');
|
||||
});
|
||||
});
|
||||
|
||||
it('should always receive user parameter when called from createMCPTool', async () => {
|
||||
const mockUser = { id: 'user-002', permissions: ['read', 'write'] };
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
|
||||
// Track all calls to reinitMCPServer
|
||||
const reinitCalls = [];
|
||||
mockReinitMCPServer.mockImplementation((params) => {
|
||||
reinitCalls.push(params);
|
||||
return Promise.resolve({
|
||||
availableTools: {
|
||||
'my-tool::my-server': {
|
||||
function: { description: 'My Tool', parameters: {} },
|
||||
},
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
await createMCPTool({
|
||||
res: mockRes,
|
||||
user: mockUser,
|
||||
toolKey: 'my-tool::my-server',
|
||||
provider: 'google',
|
||||
userMCPAuthMap: {},
|
||||
availableTools: undefined, // Force reinit
|
||||
});
|
||||
|
||||
// Verify the call to reinitMCPServer had the user
|
||||
expect(reinitCalls.length).toBe(1);
|
||||
expect(reinitCalls[0].user).toBe(mockUser);
|
||||
expect(reinitCalls[0].user.id).toBe('user-002');
|
||||
});
|
||||
});
|
||||
|
||||
describe('User parameter integrity', () => {
|
||||
it('should preserve user object properties through the call chain', async () => {
|
||||
const complexUser = {
|
||||
id: 'complex-user',
|
||||
name: 'John Doe',
|
||||
email: 'john@example.com',
|
||||
metadata: { subscription: 'premium', settings: { theme: 'dark' } },
|
||||
};
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
|
||||
let capturedUser = null;
|
||||
mockReinitMCPServer.mockImplementation((params) => {
|
||||
capturedUser = params.user;
|
||||
return Promise.resolve({
|
||||
tools: [{ name: 'test' }],
|
||||
availableTools: {
|
||||
'test::server': { function: { description: 'Test', parameters: {} } },
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
await createMCPTools({
|
||||
res: mockRes,
|
||||
user: complexUser,
|
||||
serverName: 'server',
|
||||
provider: 'openai',
|
||||
userMCPAuthMap: {},
|
||||
});
|
||||
|
||||
// Verify the complete user object was passed
|
||||
expect(capturedUser).toEqual(complexUser);
|
||||
expect(capturedUser.id).toBe('complex-user');
|
||||
expect(capturedUser.metadata.subscription).toBe('premium');
|
||||
expect(capturedUser.metadata.settings.theme).toBe('dark');
|
||||
});
|
||||
|
||||
it('should throw error when user is null', async () => {
|
||||
const mockRes = { write: jest.fn(), flush: jest.fn() };
|
||||
|
||||
mockReinitMCPServer.mockResolvedValue({
|
||||
tools: [],
|
||||
availableTools: {},
|
||||
});
|
||||
|
||||
await expect(
|
||||
createMCPTools({
|
||||
res: mockRes,
|
||||
user: null,
|
||||
serverName: 'test-server',
|
||||
provider: 'openai',
|
||||
userMCPAuthMap: {},
|
||||
}),
|
||||
).rejects.toThrow("Cannot read properties of null (reading 'id')");
|
||||
|
||||
// Verify reinitMCPServer was not called due to early error
|
||||
expect(mockReinitMCPServer).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
const axios = require('axios');
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const { logAxiosError } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { logAxiosError, inputSchema, processModelData } = require('@librechat/api');
|
||||
const { EModelEndpoint, defaultModels, CacheKeys } = require('librechat-data-provider');
|
||||
const { inputSchema, extractBaseURL, processModelData } = require('~/utils');
|
||||
const { OllamaClient } = require('~/app/clients/OllamaClient');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
|
||||
/**
|
||||
* Splits a string by commas and trims each resulting value.
|
||||
|
||||
@@ -11,8 +11,8 @@ const {
|
||||
getAnthropicModels,
|
||||
} = require('./ModelService');
|
||||
|
||||
jest.mock('~/utils', () => {
|
||||
const originalUtils = jest.requireActual('~/utils');
|
||||
jest.mock('@librechat/api', () => {
|
||||
const originalUtils = jest.requireActual('@librechat/api');
|
||||
return {
|
||||
...originalUtils,
|
||||
processModelData: jest.fn((...args) => {
|
||||
@@ -108,7 +108,7 @@ describe('fetchModels with createTokenConfig true', () => {
|
||||
|
||||
beforeEach(() => {
|
||||
// Clears the mock's history before each test
|
||||
const _utils = require('~/utils');
|
||||
const _utils = require('@librechat/api');
|
||||
axios.get.mockResolvedValue({ data });
|
||||
});
|
||||
|
||||
@@ -120,7 +120,7 @@ describe('fetchModels with createTokenConfig true', () => {
|
||||
createTokenConfig: true,
|
||||
});
|
||||
|
||||
const { processModelData } = require('~/utils');
|
||||
const { processModelData } = require('@librechat/api');
|
||||
expect(processModelData).toHaveBeenCalled();
|
||||
expect(processModelData).toHaveBeenCalledWith(data);
|
||||
});
|
||||
|
||||
@@ -313,7 +313,7 @@ const ensurePrincipalExists = async function (principal) {
|
||||
idOnTheSource: principal.idOnTheSource,
|
||||
};
|
||||
|
||||
const userId = await createUser(userData, true, false);
|
||||
const userId = await createUser(userData, true, true);
|
||||
return userId.toString();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { tool: toolFn, DynamicStructuredTool } = require('@langchain/core/tools');
|
||||
const { getToolkitKey, hasCustomUserVars, getUserMCPAuthMap } = require('@librechat/api');
|
||||
const {
|
||||
getToolkitKey,
|
||||
hasCustomUserVars,
|
||||
getUserMCPAuthMap,
|
||||
isActionDomainAllowed,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Tools,
|
||||
Constants,
|
||||
@@ -26,7 +31,6 @@ const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/p
|
||||
const { getEndpointsConfig, getCachedTools } = require('~/server/services/Config');
|
||||
const { manifestToolMap, toolkits } = require('~/app/clients/tools/manifest');
|
||||
const { createOnSearchResults } = require('~/server/services/Tools/search');
|
||||
const { isActionDomainAllowed } = require('~/server/services/domains');
|
||||
const { recordUsage } = require('~/server/services/Threads');
|
||||
const { loadTools } = require('~/app/clients/tools/util');
|
||||
const { redactMessage } = require('~/config/parsers');
|
||||
@@ -74,7 +78,7 @@ async function processRequiredActions(client, requiredActions) {
|
||||
requiredActions,
|
||||
);
|
||||
const appConfig = client.req.config;
|
||||
const toolDefinitions = await getCachedTools({ userId: client.req.user.id, includeGlobal: true });
|
||||
const toolDefinitions = await getCachedTools();
|
||||
const seenToolkits = new Set();
|
||||
const tools = requiredActions
|
||||
.map((action) => {
|
||||
@@ -353,7 +357,12 @@ async function processRequiredActions(client, requiredActions) {
|
||||
async function loadAgentTools({ req, res, agent, signal, tool_resources, openAIApiKey }) {
|
||||
if (!agent.tools || agent.tools.length === 0) {
|
||||
return {};
|
||||
} else if (agent.tools && agent.tools.length === 1 && agent.tools[0] === AgentCapabilities.ocr) {
|
||||
} else if (
|
||||
agent.tools &&
|
||||
agent.tools.length === 1 &&
|
||||
/** Legacy handling for `ocr` as may still exist in existing Agents */
|
||||
(agent.tools[0] === AgentCapabilities.context || agent.tools[0] === AgentCapabilities.ocr)
|
||||
) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
||||
@@ -2,12 +2,12 @@ const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys, Constants } = require('librechat-data-provider');
|
||||
const { findToken, createToken, updateToken, deleteTokens } = require('~/models');
|
||||
const { getMCPManager, getFlowStateManager } = require('~/config');
|
||||
const { updateMCPUserTools } = require('~/server/services/Config');
|
||||
const { updateMCPServerTools } = require('~/server/services/Config');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
/**
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {IUser} params.user - The user from the request object.
|
||||
* @param {string} params.serverName - The name of the MCP server
|
||||
* @param {boolean} params.returnOnOAuth - Whether to initiate OAuth and return, or wait for OAuth flow to finish
|
||||
* @param {AbortSignal} [params.signal] - The abort signal to handle cancellation.
|
||||
@@ -18,7 +18,7 @@ const { getLogStores } = require('~/cache');
|
||||
* @param {Record<string, Record<string, string>>} [params.userMCPAuthMap]
|
||||
*/
|
||||
async function reinitMCPServer({
|
||||
req,
|
||||
user,
|
||||
signal,
|
||||
forceNew,
|
||||
serverName,
|
||||
@@ -29,7 +29,7 @@ async function reinitMCPServer({
|
||||
flowManager: _flowManager,
|
||||
}) {
|
||||
/** @type {MCPConnection | null} */
|
||||
let userConnection = null;
|
||||
let connection = null;
|
||||
/** @type {LCAvailableTools | null} */
|
||||
let availableTools = null;
|
||||
/** @type {ReturnType<MCPConnection['fetchTools']> | null} */
|
||||
@@ -44,14 +44,14 @@ async function reinitMCPServer({
|
||||
const oauthStart =
|
||||
_oauthStart ??
|
||||
(async (authURL) => {
|
||||
logger.info(`[MCP Reinitialize] OAuth URL received: ${authURL}`);
|
||||
logger.info(`[MCP Reinitialize] OAuth URL received for ${serverName}`);
|
||||
oauthUrl = authURL;
|
||||
oauthRequired = true;
|
||||
});
|
||||
|
||||
try {
|
||||
userConnection = await mcpManager.getUserConnection({
|
||||
user: req.user,
|
||||
connection = await mcpManager.getConnection({
|
||||
user,
|
||||
signal,
|
||||
forceNew,
|
||||
oauthStart,
|
||||
@@ -70,7 +70,7 @@ async function reinitMCPServer({
|
||||
|
||||
logger.info(`[MCP Reinitialize] Successfully established connection for ${serverName}`);
|
||||
} catch (err) {
|
||||
logger.info(`[MCP Reinitialize] getUserConnection threw error: ${err.message}`);
|
||||
logger.info(`[MCP Reinitialize] getConnection threw error: ${err.message}`);
|
||||
logger.info(
|
||||
`[MCP Reinitialize] OAuth state - oauthRequired: ${oauthRequired}, oauthUrl: ${oauthUrl ? 'present' : 'null'}`,
|
||||
);
|
||||
@@ -95,10 +95,9 @@ async function reinitMCPServer({
|
||||
}
|
||||
}
|
||||
|
||||
if (userConnection && !oauthRequired) {
|
||||
tools = await userConnection.fetchTools();
|
||||
availableTools = await updateMCPUserTools({
|
||||
userId: req.user.id,
|
||||
if (connection && !oauthRequired) {
|
||||
tools = await connection.fetchTools();
|
||||
availableTools = await updateMCPServerTools({
|
||||
serverName,
|
||||
tools,
|
||||
});
|
||||
@@ -112,7 +111,7 @@ async function reinitMCPServer({
|
||||
if (oauthRequired) {
|
||||
return `MCP server '${serverName}' ready for OAuth authentication`;
|
||||
}
|
||||
if (userConnection) {
|
||||
if (connection) {
|
||||
return `MCP server '${serverName}' reinitialized successfully`;
|
||||
}
|
||||
return `Failed to reinitialize MCP server '${serverName}'`;
|
||||
@@ -120,7 +119,7 @@ async function reinitMCPServer({
|
||||
|
||||
const result = {
|
||||
availableTools,
|
||||
success: Boolean((userConnection && !oauthRequired) || (oauthRequired && oauthUrl)),
|
||||
success: Boolean((connection && !oauthRequired) || (oauthRequired && oauthUrl)),
|
||||
message: getResponseMessage(),
|
||||
oauthRequired,
|
||||
serverName,
|
||||
|
||||
26
api/server/services/initializeOAuthReconnectManager.js
Normal file
26
api/server/services/initializeOAuthReconnectManager.js
Normal file
@@ -0,0 +1,26 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { createOAuthReconnectionManager, getFlowStateManager } = require('~/config');
|
||||
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
/**
|
||||
* Initialize OAuth reconnect manager
|
||||
*/
|
||||
async function initializeOAuthReconnectManager() {
|
||||
try {
|
||||
const flowManager = getFlowStateManager(getLogStores(CacheKeys.FLOWS));
|
||||
const tokenMethods = {
|
||||
findToken,
|
||||
updateToken,
|
||||
createToken,
|
||||
deleteTokens,
|
||||
};
|
||||
await createOAuthReconnectionManager(flowManager, tokenMethods);
|
||||
logger.info(`OAuth reconnect manager initialized successfully.`);
|
||||
} catch (error) {
|
||||
logger.error('Failed to initialize OAuth reconnect manager:', error);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = initializeOAuthReconnectManager;
|
||||
@@ -229,7 +229,7 @@
|
||||
>
|
||||
<!--[if mso]><style>.v-button {background: transparent !important;}</style><![endif]-->
|
||||
<div align='left'>
|
||||
<!--[if mso]><v:roundrect xmlns:v="urn:schemas-microsoft-com:vml" xmlns:w="urn:schemas-microsoft-com:office:word" href="href="{{verificationLink}}"" style="height:37px; v-text-anchor:middle; width:114px;" arcsize="11%" stroke="f" fillcolor="#10a37f"><w:anchorlock/><center style="color:#FFFFFF;"><![endif]-->
|
||||
<!--[if mso]><v:roundrect xmlns:v="urn:schemas-microsoft-com:vml" xmlns:w="urn:schemas-microsoft-com:office:word" href="{{verificationLink}}" style="height:37px; v-text-anchor:middle; width:114px;" arcsize="11%" stroke="f" fillcolor="#10a37f"><w:anchorlock/><center style="color:#FFFFFF;"><![endif]-->
|
||||
<a
|
||||
href='{{verificationLink}}'
|
||||
target='_blank'
|
||||
|
||||
@@ -10,6 +10,10 @@ jest.mock('~/models/Message', () => ({
|
||||
bulkSaveMessages: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/models/ConversationTag', () => ({
|
||||
bulkIncrementTagCounts: jest.fn(),
|
||||
}));
|
||||
|
||||
let mockIdCounter = 0;
|
||||
jest.mock('uuid', () => {
|
||||
return {
|
||||
@@ -22,11 +26,13 @@ jest.mock('uuid', () => {
|
||||
|
||||
const {
|
||||
forkConversation,
|
||||
duplicateConversation,
|
||||
splitAtTargetLevel,
|
||||
getAllMessagesUpToParent,
|
||||
getMessagesUpToTargetLevel,
|
||||
cloneMessagesWithTimestamps,
|
||||
} = require('./fork');
|
||||
const { bulkIncrementTagCounts } = require('~/models/ConversationTag');
|
||||
const { getConvo, bulkSaveConvos } = require('~/models/Conversation');
|
||||
const { getMessages, bulkSaveMessages } = require('~/models/Message');
|
||||
const { createImportBatchBuilder } = require('./importBatchBuilder');
|
||||
@@ -181,6 +187,120 @@ describe('forkConversation', () => {
|
||||
}),
|
||||
).rejects.toThrow('Failed to fetch messages');
|
||||
});
|
||||
|
||||
test('should increment tag counts when forking conversation with tags', async () => {
|
||||
const mockConvoWithTags = {
|
||||
...mockConversation,
|
||||
tags: ['bookmark1', 'bookmark2'],
|
||||
};
|
||||
getConvo.mockResolvedValue(mockConvoWithTags);
|
||||
|
||||
await forkConversation({
|
||||
originalConvoId: 'abc123',
|
||||
targetMessageId: '3',
|
||||
requestUserId: 'user1',
|
||||
option: ForkOptions.DIRECT_PATH,
|
||||
});
|
||||
|
||||
// Verify that bulkIncrementTagCounts was called with correct tags
|
||||
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', ['bookmark1', 'bookmark2']);
|
||||
});
|
||||
|
||||
test('should handle conversation without tags when forking', async () => {
|
||||
const mockConvoWithoutTags = {
|
||||
...mockConversation,
|
||||
// No tags field
|
||||
};
|
||||
getConvo.mockResolvedValue(mockConvoWithoutTags);
|
||||
|
||||
await forkConversation({
|
||||
originalConvoId: 'abc123',
|
||||
targetMessageId: '3',
|
||||
requestUserId: 'user1',
|
||||
option: ForkOptions.DIRECT_PATH,
|
||||
});
|
||||
|
||||
// bulkIncrementTagCounts will be called with array containing undefined
|
||||
expect(bulkIncrementTagCounts).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle empty tags array when forking', async () => {
|
||||
const mockConvoWithEmptyTags = {
|
||||
...mockConversation,
|
||||
tags: [],
|
||||
};
|
||||
getConvo.mockResolvedValue(mockConvoWithEmptyTags);
|
||||
|
||||
await forkConversation({
|
||||
originalConvoId: 'abc123',
|
||||
targetMessageId: '3',
|
||||
requestUserId: 'user1',
|
||||
option: ForkOptions.DIRECT_PATH,
|
||||
});
|
||||
|
||||
// bulkIncrementTagCounts will be called with empty array
|
||||
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', []);
|
||||
});
|
||||
});
|
||||
|
||||
describe('duplicateConversation', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
mockIdCounter = 0;
|
||||
getConvo.mockResolvedValue(mockConversation);
|
||||
getMessages.mockResolvedValue(mockMessages);
|
||||
bulkSaveConvos.mockResolvedValue(null);
|
||||
bulkSaveMessages.mockResolvedValue(null);
|
||||
bulkIncrementTagCounts.mockResolvedValue(null);
|
||||
});
|
||||
|
||||
test('should duplicate conversation and increment tag counts', async () => {
|
||||
const mockConvoWithTags = {
|
||||
...mockConversation,
|
||||
tags: ['important', 'work', 'project'],
|
||||
};
|
||||
getConvo.mockResolvedValue(mockConvoWithTags);
|
||||
|
||||
await duplicateConversation({
|
||||
userId: 'user1',
|
||||
conversationId: 'abc123',
|
||||
});
|
||||
|
||||
// Verify that bulkIncrementTagCounts was called with correct tags
|
||||
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', ['important', 'work', 'project']);
|
||||
});
|
||||
|
||||
test('should duplicate conversation without tags', async () => {
|
||||
const mockConvoWithoutTags = {
|
||||
...mockConversation,
|
||||
// No tags field
|
||||
};
|
||||
getConvo.mockResolvedValue(mockConvoWithoutTags);
|
||||
|
||||
await duplicateConversation({
|
||||
userId: 'user1',
|
||||
conversationId: 'abc123',
|
||||
});
|
||||
|
||||
// bulkIncrementTagCounts will be called with array containing undefined
|
||||
expect(bulkIncrementTagCounts).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should handle empty tags array when duplicating', async () => {
|
||||
const mockConvoWithEmptyTags = {
|
||||
...mockConversation,
|
||||
tags: [],
|
||||
};
|
||||
getConvo.mockResolvedValue(mockConvoWithEmptyTags);
|
||||
|
||||
await duplicateConversation({
|
||||
userId: 'user1',
|
||||
conversationId: 'abc123',
|
||||
});
|
||||
|
||||
// bulkIncrementTagCounts will be called with empty array
|
||||
expect(bulkIncrementTagCounts).toHaveBeenCalledWith('user1', []);
|
||||
});
|
||||
});
|
||||
|
||||
const mockMessagesComplex = [
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { EModelEndpoint, Constants, openAISettings } = require('librechat-data-provider');
|
||||
const { bulkIncrementTagCounts } = require('~/models/ConversationTag');
|
||||
const { bulkSaveConvos } = require('~/models/Conversation');
|
||||
const { bulkSaveMessages } = require('~/models/Message');
|
||||
const { logger } = require('~/config');
|
||||
@@ -93,13 +94,22 @@ class ImportBatchBuilder {
|
||||
|
||||
/**
|
||||
* Saves the batch of conversations and messages to the DB.
|
||||
* Also increments tag counts for any existing tags.
|
||||
* @returns {Promise<void>} A promise that resolves when the batch is saved.
|
||||
* @throws {Error} If there is an error saving the batch.
|
||||
*/
|
||||
async saveBatch() {
|
||||
try {
|
||||
await bulkSaveConvos(this.conversations);
|
||||
await bulkSaveMessages(this.messages, true);
|
||||
const promises = [];
|
||||
promises.push(bulkSaveConvos(this.conversations));
|
||||
promises.push(bulkSaveMessages(this.messages, true));
|
||||
promises.push(
|
||||
bulkIncrementTagCounts(
|
||||
this.requestUserId,
|
||||
this.conversations.flatMap((convo) => convo.tags),
|
||||
),
|
||||
);
|
||||
await Promise.all(promises);
|
||||
logger.debug(
|
||||
`user: ${this.requestUserId} | Added ${this.conversations.length} conversations and ${this.messages.length} messages to the DB.`,
|
||||
);
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { EModelEndpoint, Constants, openAISettings, CacheKeys } = require('librechat-data-provider');
|
||||
const { createImportBatchBuilder } = require('./importBatchBuilder');
|
||||
const { cloneMessagesWithTimestamps } = require('./fork');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
/**
|
||||
* Returns the appropriate importer function based on the provided JSON data.
|
||||
@@ -212,6 +212,29 @@ function processConversation(conv, importBatchBuilder, requestUserId) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function to find the nearest non-system parent
|
||||
* @param {string} parentId - The ID of the parent message.
|
||||
* @returns {string} The ID of the nearest non-system parent message.
|
||||
*/
|
||||
const findNonSystemParent = (parentId) => {
|
||||
if (!parentId || !messageMap.has(parentId)) {
|
||||
return Constants.NO_PARENT;
|
||||
}
|
||||
|
||||
const parentMapping = conv.mapping[parentId];
|
||||
if (!parentMapping?.message) {
|
||||
return Constants.NO_PARENT;
|
||||
}
|
||||
|
||||
/* If parent is a system message, traverse up to find the nearest non-system parent */
|
||||
if (parentMapping.message.author?.role === 'system') {
|
||||
return findNonSystemParent(parentMapping.parent);
|
||||
}
|
||||
|
||||
return messageMap.get(parentId);
|
||||
};
|
||||
|
||||
// Create and save messages using the mapped IDs
|
||||
const messages = [];
|
||||
for (const [id, mapping] of Object.entries(conv.mapping)) {
|
||||
@@ -220,23 +243,27 @@ function processConversation(conv, importBatchBuilder, requestUserId) {
|
||||
messageMap.delete(id);
|
||||
continue;
|
||||
} else if (role === 'system') {
|
||||
messageMap.delete(id);
|
||||
// Skip system messages but keep their ID in messageMap for parent references
|
||||
continue;
|
||||
}
|
||||
|
||||
const newMessageId = messageMap.get(id);
|
||||
const parentMessageId =
|
||||
mapping.parent && messageMap.has(mapping.parent)
|
||||
? messageMap.get(mapping.parent)
|
||||
: Constants.NO_PARENT;
|
||||
const parentMessageId = findNonSystemParent(mapping.parent);
|
||||
|
||||
const messageText = formatMessageText(mapping.message);
|
||||
|
||||
const isCreatedByUser = role === 'user';
|
||||
let sender = isCreatedByUser ? 'user' : 'GPT-3.5';
|
||||
let sender = isCreatedByUser ? 'user' : 'assistant';
|
||||
const model = mapping.message.metadata.model_slug || openAISettings.model.default;
|
||||
if (model.includes('gpt-4')) {
|
||||
sender = 'GPT-4';
|
||||
|
||||
if (!isCreatedByUser) {
|
||||
/** Extracted model name from model slug */
|
||||
const gptMatch = model.match(/gpt-(.+)/i);
|
||||
if (gptMatch) {
|
||||
sender = `GPT-${gptMatch[1]}`;
|
||||
} else {
|
||||
sender = model || 'assistant';
|
||||
}
|
||||
}
|
||||
|
||||
messages.push({
|
||||
|
||||
@@ -99,6 +99,404 @@ describe('importChatGptConvo', () => {
|
||||
|
||||
expect(importBatchBuilder.saveBatch).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle system messages without breaking parent-child relationships', async () => {
|
||||
/**
|
||||
* Test data that reproduces message graph "breaking" when it encounters a system message
|
||||
*/
|
||||
const testData = [
|
||||
{
|
||||
title: 'System Message Parent Test',
|
||||
create_time: 1714585031.148505,
|
||||
update_time: 1714585060.879308,
|
||||
mapping: {
|
||||
'root-node': {
|
||||
id: 'root-node',
|
||||
message: null,
|
||||
parent: null,
|
||||
children: ['user-msg-1'],
|
||||
},
|
||||
'user-msg-1': {
|
||||
id: 'user-msg-1',
|
||||
message: {
|
||||
id: 'user-msg-1',
|
||||
author: { role: 'user' },
|
||||
create_time: 1714585031.150442,
|
||||
content: { content_type: 'text', parts: ['First user message'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'root-node',
|
||||
children: ['assistant-msg-1'],
|
||||
},
|
||||
'assistant-msg-1': {
|
||||
id: 'assistant-msg-1',
|
||||
message: {
|
||||
id: 'assistant-msg-1',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585032.150442,
|
||||
content: { content_type: 'text', parts: ['First assistant response'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'user-msg-1',
|
||||
children: ['system-msg'],
|
||||
},
|
||||
'system-msg': {
|
||||
id: 'system-msg',
|
||||
message: {
|
||||
id: 'system-msg',
|
||||
author: { role: 'system' },
|
||||
create_time: 1714585033.150442,
|
||||
content: { content_type: 'text', parts: ['System message in middle'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'assistant-msg-1',
|
||||
children: ['user-msg-2'],
|
||||
},
|
||||
'user-msg-2': {
|
||||
id: 'user-msg-2',
|
||||
message: {
|
||||
id: 'user-msg-2',
|
||||
author: { role: 'user' },
|
||||
create_time: 1714585034.150442,
|
||||
content: { content_type: 'text', parts: ['Second user message'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'system-msg',
|
||||
children: ['assistant-msg-2'],
|
||||
},
|
||||
'assistant-msg-2': {
|
||||
id: 'assistant-msg-2',
|
||||
message: {
|
||||
id: 'assistant-msg-2',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585035.150442,
|
||||
content: { content_type: 'text', parts: ['Second assistant response'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'user-msg-2',
|
||||
children: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const requestUserId = 'user-123';
|
||||
const importBatchBuilder = new ImportBatchBuilder(requestUserId);
|
||||
jest.spyOn(importBatchBuilder, 'saveMessage');
|
||||
|
||||
const importer = getImporter(testData);
|
||||
await importer(testData, requestUserId, () => importBatchBuilder);
|
||||
|
||||
/** 2 user messages + 2 assistant messages (system message should be skipped) */
|
||||
const expectedMessages = 4;
|
||||
expect(importBatchBuilder.saveMessage).toHaveBeenCalledTimes(expectedMessages);
|
||||
|
||||
const savedMessages = importBatchBuilder.saveMessage.mock.calls.map((call) => call[0]);
|
||||
|
||||
const messageMap = new Map();
|
||||
savedMessages.forEach((msg) => {
|
||||
messageMap.set(msg.text, msg);
|
||||
});
|
||||
|
||||
const firstUser = messageMap.get('First user message');
|
||||
const firstAssistant = messageMap.get('First assistant response');
|
||||
const secondUser = messageMap.get('Second user message');
|
||||
const secondAssistant = messageMap.get('Second assistant response');
|
||||
|
||||
expect(firstUser).toBeDefined();
|
||||
expect(firstAssistant).toBeDefined();
|
||||
expect(secondUser).toBeDefined();
|
||||
expect(secondAssistant).toBeDefined();
|
||||
expect(firstUser.parentMessageId).toBe(Constants.NO_PARENT);
|
||||
expect(firstAssistant.parentMessageId).toBe(firstUser.messageId);
|
||||
|
||||
// This is the key test: second user message should have first assistant as parent
|
||||
// (not NO_PARENT which would indicate the system message broke the chain)
|
||||
expect(secondUser.parentMessageId).toBe(firstAssistant.messageId);
|
||||
expect(secondAssistant.parentMessageId).toBe(secondUser.messageId);
|
||||
});
|
||||
|
||||
it('should maintain correct sender for user messages regardless of GPT-4 model', async () => {
|
||||
/**
|
||||
* Test data with GPT-4 model to ensure user messages keep 'user' sender
|
||||
*/
|
||||
const testData = [
|
||||
{
|
||||
title: 'GPT-4 Sender Test',
|
||||
create_time: 1714585031.148505,
|
||||
update_time: 1714585060.879308,
|
||||
mapping: {
|
||||
'root-node': {
|
||||
id: 'root-node',
|
||||
message: null,
|
||||
parent: null,
|
||||
children: ['user-msg-1'],
|
||||
},
|
||||
'user-msg-1': {
|
||||
id: 'user-msg-1',
|
||||
message: {
|
||||
id: 'user-msg-1',
|
||||
author: { role: 'user' },
|
||||
create_time: 1714585031.150442,
|
||||
content: { content_type: 'text', parts: ['User message with GPT-4'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'root-node',
|
||||
children: ['assistant-msg-1'],
|
||||
},
|
||||
'assistant-msg-1': {
|
||||
id: 'assistant-msg-1',
|
||||
message: {
|
||||
id: 'assistant-msg-1',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585032.150442,
|
||||
content: { content_type: 'text', parts: ['Assistant response with GPT-4'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'user-msg-1',
|
||||
children: ['user-msg-2'],
|
||||
},
|
||||
'user-msg-2': {
|
||||
id: 'user-msg-2',
|
||||
message: {
|
||||
id: 'user-msg-2',
|
||||
author: { role: 'user' },
|
||||
create_time: 1714585033.150442,
|
||||
content: { content_type: 'text', parts: ['Another user message with GPT-4o-mini'] },
|
||||
metadata: { model_slug: 'gpt-4o-mini' },
|
||||
},
|
||||
parent: 'assistant-msg-1',
|
||||
children: ['assistant-msg-2'],
|
||||
},
|
||||
'assistant-msg-2': {
|
||||
id: 'assistant-msg-2',
|
||||
message: {
|
||||
id: 'assistant-msg-2',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585034.150442,
|
||||
content: { content_type: 'text', parts: ['Assistant response with GPT-3.5'] },
|
||||
metadata: { model_slug: 'gpt-3.5-turbo' },
|
||||
},
|
||||
parent: 'user-msg-2',
|
||||
children: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const requestUserId = 'user-123';
|
||||
const importBatchBuilder = new ImportBatchBuilder(requestUserId);
|
||||
jest.spyOn(importBatchBuilder, 'saveMessage');
|
||||
|
||||
const importer = getImporter(testData);
|
||||
await importer(testData, requestUserId, () => importBatchBuilder);
|
||||
|
||||
const savedMessages = importBatchBuilder.saveMessage.mock.calls.map((call) => call[0]);
|
||||
|
||||
const userMsg1 = savedMessages.find((msg) => msg.text === 'User message with GPT-4');
|
||||
const assistantMsg1 = savedMessages.find((msg) => msg.text === 'Assistant response with GPT-4');
|
||||
const userMsg2 = savedMessages.find(
|
||||
(msg) => msg.text === 'Another user message with GPT-4o-mini',
|
||||
);
|
||||
const assistantMsg2 = savedMessages.find(
|
||||
(msg) => msg.text === 'Assistant response with GPT-3.5',
|
||||
);
|
||||
|
||||
expect(userMsg1.sender).toBe('user');
|
||||
expect(userMsg1.isCreatedByUser).toBe(true);
|
||||
expect(userMsg1.model).toBe('gpt-4');
|
||||
|
||||
expect(userMsg2.sender).toBe('user');
|
||||
expect(userMsg2.isCreatedByUser).toBe(true);
|
||||
expect(userMsg2.model).toBe('gpt-4o-mini');
|
||||
|
||||
expect(assistantMsg1.sender).toBe('GPT-4');
|
||||
expect(assistantMsg1.isCreatedByUser).toBe(false);
|
||||
expect(assistantMsg1.model).toBe('gpt-4');
|
||||
|
||||
expect(assistantMsg2.sender).toBe('GPT-3.5-turbo');
|
||||
expect(assistantMsg2.isCreatedByUser).toBe(false);
|
||||
expect(assistantMsg2.model).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
|
||||
it('should correctly extract and format model names from various model slugs', async () => {
|
||||
/**
|
||||
* Test data with various model slugs to test dynamic model identifier extraction
|
||||
*/
|
||||
const testData = [
|
||||
{
|
||||
title: 'Dynamic Model Identifier Test',
|
||||
create_time: 1714585031.148505,
|
||||
update_time: 1714585060.879308,
|
||||
mapping: {
|
||||
'root-node': {
|
||||
id: 'root-node',
|
||||
message: null,
|
||||
parent: null,
|
||||
children: ['msg-1'],
|
||||
},
|
||||
'msg-1': {
|
||||
id: 'msg-1',
|
||||
message: {
|
||||
id: 'msg-1',
|
||||
author: { role: 'user' },
|
||||
create_time: 1714585031.150442,
|
||||
content: { content_type: 'text', parts: ['Test message'] },
|
||||
metadata: {},
|
||||
},
|
||||
parent: 'root-node',
|
||||
children: ['msg-2', 'msg-3', 'msg-4', 'msg-5', 'msg-6', 'msg-7', 'msg-8', 'msg-9'],
|
||||
},
|
||||
'msg-2': {
|
||||
id: 'msg-2',
|
||||
message: {
|
||||
id: 'msg-2',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585032.150442,
|
||||
content: { content_type: 'text', parts: ['GPT-4 response'] },
|
||||
metadata: { model_slug: 'gpt-4' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-3': {
|
||||
id: 'msg-3',
|
||||
message: {
|
||||
id: 'msg-3',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585033.150442,
|
||||
content: { content_type: 'text', parts: ['GPT-4o response'] },
|
||||
metadata: { model_slug: 'gpt-4o' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-4': {
|
||||
id: 'msg-4',
|
||||
message: {
|
||||
id: 'msg-4',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585034.150442,
|
||||
content: { content_type: 'text', parts: ['GPT-4o-mini response'] },
|
||||
metadata: { model_slug: 'gpt-4o-mini' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-5': {
|
||||
id: 'msg-5',
|
||||
message: {
|
||||
id: 'msg-5',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585035.150442,
|
||||
content: { content_type: 'text', parts: ['GPT-3.5-turbo response'] },
|
||||
metadata: { model_slug: 'gpt-3.5-turbo' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-6': {
|
||||
id: 'msg-6',
|
||||
message: {
|
||||
id: 'msg-6',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585036.150442,
|
||||
content: { content_type: 'text', parts: ['GPT-4-turbo response'] },
|
||||
metadata: { model_slug: 'gpt-4-turbo' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-7': {
|
||||
id: 'msg-7',
|
||||
message: {
|
||||
id: 'msg-7',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585037.150442,
|
||||
content: { content_type: 'text', parts: ['GPT-4-1106-preview response'] },
|
||||
metadata: { model_slug: 'gpt-4-1106-preview' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-8': {
|
||||
id: 'msg-8',
|
||||
message: {
|
||||
id: 'msg-8',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585038.150442,
|
||||
content: { content_type: 'text', parts: ['Claude response'] },
|
||||
metadata: { model_slug: 'claude-3-opus' },
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
'msg-9': {
|
||||
id: 'msg-9',
|
||||
message: {
|
||||
id: 'msg-9',
|
||||
author: { role: 'assistant' },
|
||||
create_time: 1714585039.150442,
|
||||
content: { content_type: 'text', parts: ['No model slug response'] },
|
||||
metadata: {},
|
||||
},
|
||||
parent: 'msg-1',
|
||||
children: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
|
||||
const requestUserId = 'user-123';
|
||||
const importBatchBuilder = new ImportBatchBuilder(requestUserId);
|
||||
jest.spyOn(importBatchBuilder, 'saveMessage');
|
||||
|
||||
const importer = getImporter(testData);
|
||||
await importer(testData, requestUserId, () => importBatchBuilder);
|
||||
|
||||
const savedMessages = importBatchBuilder.saveMessage.mock.calls.map((call) => call[0]);
|
||||
|
||||
// Test various GPT model slug formats
|
||||
const gpt4 = savedMessages.find((msg) => msg.text === 'GPT-4 response');
|
||||
expect(gpt4.sender).toBe('GPT-4');
|
||||
expect(gpt4.model).toBe('gpt-4');
|
||||
|
||||
const gpt4o = savedMessages.find((msg) => msg.text === 'GPT-4o response');
|
||||
expect(gpt4o.sender).toBe('GPT-4o');
|
||||
expect(gpt4o.model).toBe('gpt-4o');
|
||||
|
||||
const gpt4oMini = savedMessages.find((msg) => msg.text === 'GPT-4o-mini response');
|
||||
expect(gpt4oMini.sender).toBe('GPT-4o-mini');
|
||||
expect(gpt4oMini.model).toBe('gpt-4o-mini');
|
||||
|
||||
const gpt35Turbo = savedMessages.find((msg) => msg.text === 'GPT-3.5-turbo response');
|
||||
expect(gpt35Turbo.sender).toBe('GPT-3.5-turbo');
|
||||
expect(gpt35Turbo.model).toBe('gpt-3.5-turbo');
|
||||
|
||||
const gpt4Turbo = savedMessages.find((msg) => msg.text === 'GPT-4-turbo response');
|
||||
expect(gpt4Turbo.sender).toBe('GPT-4-turbo');
|
||||
expect(gpt4Turbo.model).toBe('gpt-4-turbo');
|
||||
|
||||
const gpt4Preview = savedMessages.find((msg) => msg.text === 'GPT-4-1106-preview response');
|
||||
expect(gpt4Preview.sender).toBe('GPT-4-1106-preview');
|
||||
expect(gpt4Preview.model).toBe('gpt-4-1106-preview');
|
||||
|
||||
// Test non-GPT model (should use the model slug as sender)
|
||||
const claude = savedMessages.find((msg) => msg.text === 'Claude response');
|
||||
expect(claude.sender).toBe('claude-3-opus');
|
||||
expect(claude.model).toBe('claude-3-opus');
|
||||
|
||||
// Test missing model slug (should default to openAISettings.model.default)
|
||||
const noModel = savedMessages.find((msg) => msg.text === 'No model slug response');
|
||||
// When no model slug is provided, it defaults to gpt-4o-mini which gets formatted to GPT-4o-mini
|
||||
expect(noModel.sender).toBe('GPT-4o-mini');
|
||||
expect(noModel.model).toBe(openAISettings.model.default);
|
||||
|
||||
// Verify user message is unaffected
|
||||
const userMsg = savedMessages.find((msg) => msg.text === 'Test message');
|
||||
expect(userMsg.sender).toBe('user');
|
||||
expect(userMsg.isCreatedByUser).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe('importLibreChatConvo', () => {
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const FormData = require('form-data');
|
||||
const nodemailer = require('nodemailer');
|
||||
const handlebars = require('handlebars');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logAxiosError, isEnabled } = require('@librechat/api');
|
||||
const { logAxiosError, isEnabled, readFileAsString } = require('@librechat/api');
|
||||
|
||||
/**
|
||||
* Sends an email using Mailgun API.
|
||||
@@ -93,8 +92,7 @@ const sendEmailViaSMTP = async ({ transporterOptions, mailOptions }) => {
|
||||
*/
|
||||
const sendEmail = async ({ email, subject, payload, template, throwError = true }) => {
|
||||
try {
|
||||
// Read and compile the email template
|
||||
const source = fs.readFileSync(path.join(__dirname, 'emails', template), 'utf8');
|
||||
const { content: source } = await readFileAsString(path.join(__dirname, 'emails', template));
|
||||
const compiledTemplate = handlebars.compile(source);
|
||||
const html = compiledTemplate(payload);
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
const fs = require('fs');
|
||||
const LdapStrategy = require('passport-ldapauth');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, getBalanceConfig } = require('@librechat/api');
|
||||
const { SystemRoles, ErrorTypes } = require('librechat-data-provider');
|
||||
const { isEnabled, getBalanceConfig, isEmailDomainAllowed } = require('@librechat/api');
|
||||
const { createUser, findUser, updateUser, countUsers } = require('~/models');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
|
||||
@@ -108,7 +108,8 @@ const ldapLogin = new LdapStrategy(ldapOptions, async (userinfo, done) => {
|
||||
const username =
|
||||
(LDAP_USERNAME && userinfo[LDAP_USERNAME]) || userinfo.givenName || userinfo.mail;
|
||||
|
||||
const mail = (LDAP_EMAIL && userinfo[LDAP_EMAIL]) || userinfo.mail || username + '@ldap.local';
|
||||
let mail = (LDAP_EMAIL && userinfo[LDAP_EMAIL]) || userinfo.mail || username + '@ldap.local';
|
||||
mail = Array.isArray(mail) ? mail[0] : mail;
|
||||
|
||||
if (!userinfo.mail && !(LDAP_EMAIL && userinfo[LDAP_EMAIL])) {
|
||||
logger.warn(
|
||||
@@ -121,9 +122,18 @@ const ldapLogin = new LdapStrategy(ldapOptions, async (userinfo, done) => {
|
||||
);
|
||||
}
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
if (!isEmailDomainAllowed(mail, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(
|
||||
`[LDAP Strategy] Authentication blocked - email domain not allowed [Email: ${mail}]`,
|
||||
);
|
||||
return done(null, false, { message: 'Email domain not allowed' });
|
||||
}
|
||||
|
||||
if (!user) {
|
||||
const isFirstRegisteredUser = (await countUsers()) === 0;
|
||||
const role = isFirstRegisteredUser ? SystemRoles.ADMIN : SystemRoles.USER;
|
||||
|
||||
user = {
|
||||
provider: 'ldap',
|
||||
ldapId,
|
||||
@@ -133,7 +143,6 @@ const ldapLogin = new LdapStrategy(ldapOptions, async (userinfo, done) => {
|
||||
name: fullName,
|
||||
role,
|
||||
};
|
||||
const appConfig = await getAppConfig({ role: user?.role });
|
||||
const balanceConfig = getBalanceConfig(appConfig);
|
||||
const userId = await createUser(user, balanceConfig);
|
||||
user._id = userId;
|
||||
|
||||
183
api/strategies/ldapStrategy.spec.js
Normal file
183
api/strategies/ldapStrategy.spec.js
Normal file
@@ -0,0 +1,183 @@
|
||||
// --- Mocks ---
|
||||
jest.mock('@librechat/data-schemas', () => ({
|
||||
logger: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
error: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/api', () => ({
|
||||
// isEnabled used for TLS flags
|
||||
isEnabled: jest.fn(() => false),
|
||||
isEmailDomainAllowed: jest.fn(() => true),
|
||||
getBalanceConfig: jest.fn(() => ({ enabled: false })),
|
||||
}));
|
||||
|
||||
jest.mock('~/models', () => ({
|
||||
findUser: jest.fn(),
|
||||
createUser: jest.fn(),
|
||||
updateUser: jest.fn(),
|
||||
countUsers: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Config', () => ({
|
||||
getAppConfig: jest.fn().mockResolvedValue({}),
|
||||
}));
|
||||
|
||||
// Mock passport-ldapauth to capture verify callback
|
||||
let verifyCallback;
|
||||
jest.mock('passport-ldapauth', () => {
|
||||
return jest.fn().mockImplementation((options, verify) => {
|
||||
verifyCallback = verify; // capture the strategy verify function
|
||||
return { name: 'ldap', options, verify };
|
||||
});
|
||||
});
|
||||
|
||||
const { ErrorTypes } = require('librechat-data-provider');
|
||||
const { isEmailDomainAllowed } = require('@librechat/api');
|
||||
const { findUser, createUser, updateUser, countUsers } = require('~/models');
|
||||
|
||||
// Helper to call the verify callback and wrap in a Promise for convenience
|
||||
const callVerify = (userinfo) =>
|
||||
new Promise((resolve, reject) => {
|
||||
verifyCallback(userinfo, (err, user, info) => {
|
||||
if (err) return reject(err);
|
||||
resolve({ user, info });
|
||||
});
|
||||
});
|
||||
|
||||
describe('ldapStrategy', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// minimal required env for ldapStrategy module to export
|
||||
process.env.LDAP_URL = 'ldap://example.com';
|
||||
process.env.LDAP_USER_SEARCH_BASE = 'ou=users,dc=example,dc=com';
|
||||
|
||||
// Unset optional envs to exercise defaults
|
||||
delete process.env.LDAP_CA_CERT_PATH;
|
||||
delete process.env.LDAP_FULL_NAME;
|
||||
delete process.env.LDAP_ID;
|
||||
delete process.env.LDAP_USERNAME;
|
||||
delete process.env.LDAP_EMAIL;
|
||||
delete process.env.LDAP_TLS_REJECT_UNAUTHORIZED;
|
||||
delete process.env.LDAP_STARTTLS;
|
||||
|
||||
// Default model/domain mocks
|
||||
findUser.mockReset().mockResolvedValue(null);
|
||||
createUser.mockReset().mockResolvedValue('newUserId');
|
||||
updateUser.mockReset().mockImplementation(async (id, user) => ({ _id: id, ...user }));
|
||||
countUsers.mockReset().mockResolvedValue(0);
|
||||
isEmailDomainAllowed.mockReset().mockReturnValue(true);
|
||||
|
||||
// Ensure requiring the strategy sets up the verify callback
|
||||
jest.isolateModules(() => {
|
||||
require('./ldapStrategy');
|
||||
});
|
||||
});
|
||||
|
||||
it('uses the first email when LDAP returns multiple emails (array)', async () => {
|
||||
const userinfo = {
|
||||
uid: 'uid123',
|
||||
givenName: 'Alice',
|
||||
cn: 'Alice Doe',
|
||||
mail: ['first@example.com', 'second@example.com'],
|
||||
};
|
||||
|
||||
const { user } = await callVerify(userinfo);
|
||||
|
||||
expect(user.email).toBe('first@example.com');
|
||||
expect(createUser).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
provider: 'ldap',
|
||||
ldapId: 'uid123',
|
||||
username: 'Alice',
|
||||
email: 'first@example.com',
|
||||
emailVerified: true,
|
||||
name: 'Alice Doe',
|
||||
}),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it('blocks login if an existing user has a different provider', async () => {
|
||||
findUser.mockResolvedValue({ _id: 'u1', email: 'first@example.com', provider: 'google' });
|
||||
|
||||
const userinfo = {
|
||||
uid: 'uid123',
|
||||
mail: 'first@example.com',
|
||||
givenName: 'Alice',
|
||||
cn: 'Alice Doe',
|
||||
};
|
||||
|
||||
const { user, info } = await callVerify(userinfo);
|
||||
|
||||
expect(user).toBe(false);
|
||||
expect(info).toEqual({ message: ErrorTypes.AUTH_FAILED });
|
||||
expect(createUser).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('updates an existing ldap user with current LDAP info', async () => {
|
||||
const existing = {
|
||||
_id: 'u2',
|
||||
provider: 'ldap',
|
||||
email: 'old@example.com',
|
||||
ldapId: 'uid123',
|
||||
username: 'olduser',
|
||||
name: 'Old Name',
|
||||
};
|
||||
findUser.mockResolvedValue(existing);
|
||||
|
||||
const userinfo = {
|
||||
uid: 'uid123',
|
||||
mail: 'new@example.com',
|
||||
givenName: 'NewFirst',
|
||||
cn: 'NewFirst NewLast',
|
||||
};
|
||||
|
||||
const { user } = await callVerify(userinfo);
|
||||
|
||||
expect(createUser).not.toHaveBeenCalled();
|
||||
expect(updateUser).toHaveBeenCalledWith(
|
||||
'u2',
|
||||
expect.objectContaining({
|
||||
provider: 'ldap',
|
||||
ldapId: 'uid123',
|
||||
email: 'new@example.com',
|
||||
username: 'NewFirst',
|
||||
name: 'NewFirst NewLast',
|
||||
}),
|
||||
);
|
||||
expect(user.email).toBe('new@example.com');
|
||||
});
|
||||
|
||||
it('falls back to username@ldap.local when no email attributes are present', async () => {
|
||||
const userinfo = {
|
||||
uid: 'uid999',
|
||||
givenName: 'John',
|
||||
cn: 'John Doe',
|
||||
// no mail and no custom LDAP_EMAIL
|
||||
};
|
||||
|
||||
const { user } = await callVerify(userinfo);
|
||||
|
||||
expect(user.email).toBe('John@ldap.local');
|
||||
});
|
||||
|
||||
it('denies login if email domain is not allowed', async () => {
|
||||
isEmailDomainAllowed.mockReturnValue(false);
|
||||
|
||||
const userinfo = {
|
||||
uid: 'uid123',
|
||||
mail: 'notallowed@blocked.com',
|
||||
givenName: 'Alice',
|
||||
cn: 'Alice Doe',
|
||||
};
|
||||
|
||||
const { user, info } = await callVerify(userinfo);
|
||||
expect(user).toBe(false);
|
||||
expect(info).toEqual({ message: 'Email domain not allowed' });
|
||||
});
|
||||
});
|
||||
@@ -41,13 +41,18 @@ const openIdJwtLogin = (openIdConfig) => {
|
||||
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
|
||||
secretOrKeyProvider: jwksRsa.passportJwtSecret(jwksRsaOptions),
|
||||
},
|
||||
/**
|
||||
* @param {import('openid-client').IDToken} payload
|
||||
* @param {import('passport-jwt').VerifyCallback} done
|
||||
*/
|
||||
async (payload, done) => {
|
||||
try {
|
||||
const { user, error, migration } = await findOpenIDUser({
|
||||
openidId: payload?.sub,
|
||||
email: payload?.email,
|
||||
strategyName: 'openIdJwtLogin',
|
||||
findUser,
|
||||
email: payload?.email,
|
||||
openidId: payload?.sub,
|
||||
idOnTheSource: payload?.oid,
|
||||
strategyName: 'openIdJwtLogin',
|
||||
});
|
||||
|
||||
if (error) {
|
||||
|
||||
@@ -13,6 +13,7 @@ const {
|
||||
safeStringify,
|
||||
findOpenIDUser,
|
||||
getBalanceConfig,
|
||||
isEmailDomainAllowed,
|
||||
} = require('@librechat/api');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { findUser, createUser, updateUser } = require('~/models');
|
||||
@@ -336,14 +337,32 @@ async function setupOpenId() {
|
||||
clockTolerance: process.env.OPENID_CLOCK_TOLERANCE || 300,
|
||||
usePKCE,
|
||||
},
|
||||
/**
|
||||
* @param {import('openid-client').TokenEndpointResponseHelpers} tokenset
|
||||
* @param {import('passport-jwt').VerifyCallback} done
|
||||
*/
|
||||
async (tokenset, done) => {
|
||||
try {
|
||||
const claims = tokenset.claims();
|
||||
const userinfo = {
|
||||
...claims,
|
||||
...(await getUserInfo(openidConfig, tokenset.access_token, claims.sub)),
|
||||
};
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
if (!isEmailDomainAllowed(userinfo.email, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(
|
||||
`[OpenID Strategy] Authentication blocked - email domain not allowed [Email: ${userinfo.email}]`,
|
||||
);
|
||||
return done(null, false, { message: 'Email domain not allowed' });
|
||||
}
|
||||
|
||||
const result = await findOpenIDUser({
|
||||
openidId: claims.sub,
|
||||
email: claims.email,
|
||||
strategyName: 'openidStrategy',
|
||||
findUser,
|
||||
email: claims.email,
|
||||
openidId: claims.sub,
|
||||
idOnTheSource: claims.oid,
|
||||
strategyName: 'openidStrategy',
|
||||
});
|
||||
let user = result.user;
|
||||
const error = result.error;
|
||||
@@ -353,13 +372,14 @@ async function setupOpenId() {
|
||||
message: ErrorTypes.AUTH_FAILED,
|
||||
});
|
||||
}
|
||||
const userinfo = {
|
||||
...claims,
|
||||
...(await getUserInfo(openidConfig, tokenset.access_token, claims.sub)),
|
||||
};
|
||||
|
||||
const fullName = getFullName(userinfo);
|
||||
|
||||
if (requiredRole) {
|
||||
const requiredRoles = requiredRole
|
||||
.split(',')
|
||||
.map((role) => role.trim())
|
||||
.filter(Boolean);
|
||||
let decodedToken = '';
|
||||
if (requiredRoleTokenKind === 'access') {
|
||||
decodedToken = jwtDecode(tokenset.access_token);
|
||||
@@ -382,9 +402,13 @@ async function setupOpenId() {
|
||||
);
|
||||
}
|
||||
|
||||
if (!roles.includes(requiredRole)) {
|
||||
if (!requiredRoles.some((role) => roles.includes(role))) {
|
||||
const rolesList =
|
||||
requiredRoles.length === 1
|
||||
? `"${requiredRoles[0]}"`
|
||||
: `one of: ${requiredRoles.map((r) => `"${r}"`).join(', ')}`;
|
||||
return done(null, false, {
|
||||
message: `You must have the "${requiredRole}" role to log in.`,
|
||||
message: `You must have ${rolesList} role to log in.`,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -398,7 +422,6 @@ async function setupOpenId() {
|
||||
);
|
||||
}
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
if (!user) {
|
||||
user = {
|
||||
provider: 'openid',
|
||||
@@ -418,6 +441,10 @@ async function setupOpenId() {
|
||||
user.username = username;
|
||||
user.name = fullName;
|
||||
user.idOnTheSource = userinfo.oid;
|
||||
if (userinfo.email && userinfo.email !== user.email) {
|
||||
user.email = userinfo.email;
|
||||
user.emailVerified = userinfo.email_verified || false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!!userinfo && userinfo.picture && !user.avatar?.includes('manual=true')) {
|
||||
|
||||
@@ -274,10 +274,7 @@ describe('setupOpenId', () => {
|
||||
name: '',
|
||||
};
|
||||
findUser.mockImplementation(async (query) => {
|
||||
if (
|
||||
query.openidId === tokenset.claims().sub ||
|
||||
(query.email === tokenset.claims().email && query.provider === 'openid')
|
||||
) {
|
||||
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
|
||||
return existingUser;
|
||||
}
|
||||
return null;
|
||||
@@ -338,7 +335,25 @@ describe('setupOpenId', () => {
|
||||
|
||||
// Assert – verify that the strategy rejects login
|
||||
expect(user).toBe(false);
|
||||
expect(details.message).toBe('You must have the "requiredRole" role to log in.');
|
||||
expect(details.message).toBe('You must have "requiredRole" role to log in.');
|
||||
});
|
||||
|
||||
it('should allow login when single required role is present (backward compatibility)', async () => {
|
||||
// Arrange – ensure single role configuration (as set in beforeEach)
|
||||
// OPENID_REQUIRED_ROLE = 'requiredRole'
|
||||
// Default jwtDecode mock in beforeEach already returns this role
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole', 'anotherRole'],
|
||||
});
|
||||
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert – verify that login succeeds with single role configuration
|
||||
expect(user).toBeTruthy();
|
||||
expect(user.email).toBe(tokenset.claims().email);
|
||||
expect(user.username).toBe(tokenset.claims().preferred_username);
|
||||
expect(createUser).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should attempt to download and save the avatar if picture is provided', async () => {
|
||||
@@ -364,6 +379,58 @@ describe('setupOpenId', () => {
|
||||
// Depending on your implementation, user.avatar may be undefined or an empty string.
|
||||
});
|
||||
|
||||
it('should support comma-separated multiple roles', async () => {
|
||||
// Arrange
|
||||
process.env.OPENID_REQUIRED_ROLE = 'someRole,anotherRole,admin';
|
||||
await setupOpenId(); // Re-initialize the strategy
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['anotherRole', 'aThirdRole'],
|
||||
});
|
||||
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert
|
||||
expect(user).toBeTruthy();
|
||||
expect(user.email).toBe(tokenset.claims().email);
|
||||
});
|
||||
|
||||
it('should reject login when user has none of the required multiple roles', async () => {
|
||||
// Arrange
|
||||
process.env.OPENID_REQUIRED_ROLE = 'someRole,anotherRole,admin';
|
||||
await setupOpenId(); // Re-initialize the strategy
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['aThirdRole', 'aFourthRole'],
|
||||
});
|
||||
|
||||
// Act
|
||||
const { user, details } = await validate(tokenset);
|
||||
|
||||
// Assert
|
||||
expect(user).toBe(false);
|
||||
expect(details.message).toBe(
|
||||
'You must have one of: "someRole", "anotherRole", "admin" role to log in.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle spaces in comma-separated roles', async () => {
|
||||
// Arrange
|
||||
process.env.OPENID_REQUIRED_ROLE = ' someRole , anotherRole , admin ';
|
||||
await setupOpenId(); // Re-initialize the strategy
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['someRole'],
|
||||
});
|
||||
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert
|
||||
expect(user).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should default to usePKCE false when OPENID_USE_PKCE is not defined', async () => {
|
||||
const OpenIDStrategy = require('openid-client/passport').Strategy;
|
||||
|
||||
|
||||
@@ -2,10 +2,10 @@ const fs = require('fs');
|
||||
const path = require('path');
|
||||
const fetch = require('node-fetch');
|
||||
const passport = require('passport');
|
||||
const { getBalanceConfig } = require('@librechat/api');
|
||||
const { ErrorTypes } = require('librechat-data-provider');
|
||||
const { hashToken, logger } = require('@librechat/data-schemas');
|
||||
const { Strategy: SamlStrategy } = require('@node-saml/passport-saml');
|
||||
const { getBalanceConfig, isEmailDomainAllowed } = require('@librechat/api');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { findUser, createUser, updateUser } = require('~/models');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
@@ -192,16 +192,25 @@ async function setupSaml() {
|
||||
logger.info(`[samlStrategy] SAML authentication received for NameID: ${profile.nameID}`);
|
||||
logger.debug('[samlStrategy] SAML profile:', profile);
|
||||
|
||||
const userEmail = getEmail(profile) || '';
|
||||
const appConfig = await getAppConfig();
|
||||
|
||||
if (!isEmailDomainAllowed(userEmail, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(
|
||||
`[SAML Strategy] Authentication blocked - email domain not allowed [Email: ${userEmail}]`,
|
||||
);
|
||||
return done(null, false, { message: 'Email domain not allowed' });
|
||||
}
|
||||
|
||||
let user = await findUser({ samlId: profile.nameID });
|
||||
logger.info(
|
||||
`[samlStrategy] User ${user ? 'found' : 'not found'} with SAML ID: ${profile.nameID}`,
|
||||
);
|
||||
|
||||
if (!user) {
|
||||
const email = getEmail(profile) || '';
|
||||
user = await findUser({ email });
|
||||
user = await findUser({ email: userEmail });
|
||||
logger.info(
|
||||
`[samlStrategy] User ${user ? 'found' : 'not found'} with email: ${profile.email}`,
|
||||
`[samlStrategy] User ${user ? 'found' : 'not found'} with email: ${userEmail}`,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -220,13 +229,12 @@ async function setupSaml() {
|
||||
getUserName(profile) || getGivenName(profile) || getEmail(profile),
|
||||
);
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
if (!user) {
|
||||
user = {
|
||||
provider: 'saml',
|
||||
samlId: profile.nameID,
|
||||
username,
|
||||
email: getEmail(profile) || '',
|
||||
email: userEmail,
|
||||
emailVerified: true,
|
||||
name: fullName,
|
||||
};
|
||||
|
||||
@@ -26,6 +26,7 @@ jest.mock('~/server/services/Config', () => ({
|
||||
getAppConfig: jest.fn().mockResolvedValue({}),
|
||||
}));
|
||||
jest.mock('@librechat/api', () => ({
|
||||
isEmailDomainAllowed: jest.fn(() => true),
|
||||
getBalanceConfig: jest.fn(() => ({
|
||||
tokenCredits: 1000,
|
||||
startBalance: 1000,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { ErrorTypes } = require('librechat-data-provider');
|
||||
const { isEnabled, isEmailDomainAllowed } = require('@librechat/api');
|
||||
const { createSocialUser, handleExistingUser } = require('./process');
|
||||
const { getAppConfig } = require('~/server/services/Config');
|
||||
const { findUser } = require('~/models');
|
||||
@@ -14,8 +14,18 @@ const socialLogin =
|
||||
});
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
|
||||
if (!isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(
|
||||
`[${provider}Login] Authentication blocked - email domain not allowed [Email: ${email}]`,
|
||||
);
|
||||
const error = new Error(ErrorTypes.AUTH_FAILED);
|
||||
error.code = ErrorTypes.AUTH_FAILED;
|
||||
error.message = 'Email domain not allowed';
|
||||
return cb(error);
|
||||
}
|
||||
|
||||
const existingUser = await findUser({ email: email.trim() });
|
||||
const ALLOW_SOCIAL_REGISTRATION = isEnabled(process.env.ALLOW_SOCIAL_REGISTRATION);
|
||||
|
||||
if (existingUser?.provider === provider) {
|
||||
await handleExistingUser(existingUser, avatarUrl, appConfig);
|
||||
@@ -30,20 +40,29 @@ const socialLogin =
|
||||
return cb(error);
|
||||
}
|
||||
|
||||
if (ALLOW_SOCIAL_REGISTRATION) {
|
||||
const newUser = await createSocialUser({
|
||||
email,
|
||||
avatarUrl,
|
||||
provider,
|
||||
providerKey: `${provider}Id`,
|
||||
providerId: id,
|
||||
username,
|
||||
name,
|
||||
emailVerified,
|
||||
appConfig,
|
||||
});
|
||||
return cb(null, newUser);
|
||||
const ALLOW_SOCIAL_REGISTRATION = isEnabled(process.env.ALLOW_SOCIAL_REGISTRATION);
|
||||
if (!ALLOW_SOCIAL_REGISTRATION) {
|
||||
logger.error(
|
||||
`[${provider}Login] Registration blocked - social registration is disabled [Email: ${email}]`,
|
||||
);
|
||||
const error = new Error(ErrorTypes.AUTH_FAILED);
|
||||
error.code = ErrorTypes.AUTH_FAILED;
|
||||
error.message = 'Social registration is disabled';
|
||||
return cb(error);
|
||||
}
|
||||
|
||||
const newUser = await createSocialUser({
|
||||
email,
|
||||
avatarUrl,
|
||||
provider,
|
||||
providerKey: `${provider}Id`,
|
||||
providerId: id,
|
||||
username,
|
||||
name,
|
||||
emailVerified,
|
||||
appConfig,
|
||||
});
|
||||
return cb(null, newUser);
|
||||
} catch (err) {
|
||||
logger.error(`[${provider}Login]`, err);
|
||||
return cb(err);
|
||||
|
||||
@@ -46,7 +46,7 @@ describe('fileSearch.js - test only new file_id and page additions', () => {
|
||||
queryVectors.mockResolvedValue(mockResults);
|
||||
|
||||
const fileSearchTool = await createFileSearchTool({
|
||||
req: { user: { id: 'user1' } },
|
||||
userId: 'user1',
|
||||
files: mockFiles,
|
||||
entity_id: 'agent-123',
|
||||
});
|
||||
|
||||
@@ -873,6 +873,13 @@
|
||||
* @typedef {import('@librechat/data-schemas').IMongoFile} MongoFile
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports ISession
|
||||
* @typedef {import('@librechat/data-schemas').ISession} ISession
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports IBalance
|
||||
* @typedef {import('@librechat/data-schemas').IBalance} IBalance
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const axios = require('axios');
|
||||
const deriveBaseURL = require('./deriveBaseURL');
|
||||
jest.mock('~/utils', () => {
|
||||
const originalUtils = jest.requireActual('~/utils');
|
||||
jest.mock('@librechat/api', () => {
|
||||
const originalUtils = jest.requireActual('@librechat/api');
|
||||
return {
|
||||
...originalUtils,
|
||||
processModelData: jest.fn((...args) => {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
const tokenHelpers = require('./tokens');
|
||||
const deriveBaseURL = require('./deriveBaseURL');
|
||||
const extractBaseURL = require('./extractBaseURL');
|
||||
const findMessageContent = require('./findMessageContent');
|
||||
@@ -6,6 +5,5 @@ const findMessageContent = require('./findMessageContent');
|
||||
module.exports = {
|
||||
deriveBaseURL,
|
||||
extractBaseURL,
|
||||
...tokenHelpers,
|
||||
findMessageContent,
|
||||
};
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
maxTokensMap,
|
||||
matchModelName,
|
||||
processModelData,
|
||||
getModelMaxTokens,
|
||||
maxOutputTokensMap,
|
||||
findMatchingPattern,
|
||||
getModelMaxTokens,
|
||||
processModelData,
|
||||
matchModelName,
|
||||
maxTokensMap,
|
||||
} = require('./tokens');
|
||||
} = require('@librechat/api');
|
||||
|
||||
describe('getModelMaxTokens', () => {
|
||||
test('should return correct tokens for exact match', () => {
|
||||
@@ -262,6 +262,15 @@ describe('getModelMaxTokens', () => {
|
||||
expect(getModelMaxTokens('gemini-1.5-pro-preview-0409', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-1.5'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-2.5-pro', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-2.5-pro'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-2.5-flash', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-2.5-flash'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-2.5-flash-lite', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-2.5-flash-lite'],
|
||||
);
|
||||
expect(getModelMaxTokens('gemini-pro-vision', EModelEndpoint.google)).toBe(
|
||||
maxTokensMap[EModelEndpoint.google]['gemini-pro-vision'],
|
||||
);
|
||||
@@ -394,7 +403,7 @@ describe('getModelMaxTokens', () => {
|
||||
});
|
||||
|
||||
test('should return correct max output tokens for GPT-5 models', () => {
|
||||
const { getModelMaxOutputTokens } = require('./tokens');
|
||||
const { getModelMaxOutputTokens } = require('@librechat/api');
|
||||
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
|
||||
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
|
||||
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
|
||||
@@ -407,7 +416,7 @@ describe('getModelMaxTokens', () => {
|
||||
});
|
||||
|
||||
test('should return correct max output tokens for GPT-OSS models', () => {
|
||||
const { getModelMaxOutputTokens } = require('./tokens');
|
||||
const { getModelMaxOutputTokens } = require('@librechat/api');
|
||||
['gpt-oss-20b', 'gpt-oss-120b'].forEach((model) => {
|
||||
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
|
||||
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
/** v0.8.0 */
|
||||
module.exports = {
|
||||
roots: ['<rootDir>/src'],
|
||||
testEnvironment: 'jsdom',
|
||||
@@ -28,7 +29,8 @@ module.exports = {
|
||||
'jest-file-loader',
|
||||
'^test/(.*)$': '<rootDir>/test/$1',
|
||||
'^~/(.*)$': '<rootDir>/src/$1',
|
||||
'^librechat-data-provider/react-query$': '<rootDir>/../node_modules/librechat-data-provider/src/react-query',
|
||||
'^librechat-data-provider/react-query$':
|
||||
'<rootDir>/../node_modules/librechat-data-provider/src/react-query',
|
||||
},
|
||||
restoreMocks: true,
|
||||
testResultsProcessor: 'jest-junit',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/frontend",
|
||||
"version": "v0.8.0-rc3",
|
||||
"version": "v0.8.0",
|
||||
"description": "",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
@@ -148,8 +148,8 @@
|
||||
"tailwindcss": "^3.4.1",
|
||||
"ts-jest": "^29.2.5",
|
||||
"typescript": "^5.3.3",
|
||||
"vite": "^6.3.4",
|
||||
"vite-plugin-compression2": "^1.3.3",
|
||||
"vite": "^6.3.6",
|
||||
"vite-plugin-compression2": "^2.2.1",
|
||||
"vite-plugin-node-polyfills": "^0.23.0",
|
||||
"vite-plugin-pwa": "^0.21.2"
|
||||
}
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
import React, { createContext, useContext, useState, useMemo } from 'react';
|
||||
import { Constants, EModelEndpoint } from 'librechat-data-provider';
|
||||
import type { MCP, Action, TPlugin, AgentToolType } from 'librechat-data-provider';
|
||||
import { EModelEndpoint } from 'librechat-data-provider';
|
||||
import type { MCP, Action, TPlugin } from 'librechat-data-provider';
|
||||
import type { AgentPanelContextType, MCPServerInfo } from '~/common';
|
||||
import { useAvailableToolsQuery, useGetActionsQuery, useGetStartupConfig } from '~/data-provider';
|
||||
import {
|
||||
useAvailableToolsQuery,
|
||||
useGetActionsQuery,
|
||||
useGetStartupConfig,
|
||||
useMCPToolsQuery,
|
||||
} from '~/data-provider';
|
||||
import { useLocalize, useGetAgentsConfig, useMCPConnectionStatus } from '~/hooks';
|
||||
import { Panel } from '~/common';
|
||||
|
||||
type GroupedToolType = AgentToolType & { tools?: AgentToolType[] };
|
||||
type GroupedToolsRecord = Record<string, GroupedToolType>;
|
||||
import { Panel, isEphemeralAgent } from '~/common';
|
||||
|
||||
const AgentPanelContext = createContext<AgentPanelContextType | undefined>(undefined);
|
||||
|
||||
@@ -28,79 +30,67 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
|
||||
const [activePanel, setActivePanel] = useState<Panel>(Panel.builder);
|
||||
const [agent_id, setCurrentAgentId] = useState<string | undefined>(undefined);
|
||||
|
||||
const { data: actions } = useGetActionsQuery(EModelEndpoint.agents, {
|
||||
enabled: !!agent_id,
|
||||
});
|
||||
|
||||
const { data: pluginTools } = useAvailableToolsQuery(EModelEndpoint.agents, {
|
||||
enabled: !!agent_id,
|
||||
});
|
||||
|
||||
const { data: startupConfig } = useGetStartupConfig();
|
||||
const { data: actions } = useGetActionsQuery(EModelEndpoint.agents, {
|
||||
enabled: !isEphemeralAgent(agent_id),
|
||||
});
|
||||
|
||||
const { data: regularTools } = useAvailableToolsQuery(EModelEndpoint.agents, {
|
||||
enabled: !isEphemeralAgent(agent_id),
|
||||
});
|
||||
|
||||
const { data: mcpData } = useMCPToolsQuery({
|
||||
enabled: !isEphemeralAgent(agent_id) && startupConfig?.mcpServers != null,
|
||||
});
|
||||
|
||||
const { agentsConfig, endpointsConfig } = useGetAgentsConfig();
|
||||
const mcpServerNames = useMemo(
|
||||
() => Object.keys(startupConfig?.mcpServers ?? {}),
|
||||
[startupConfig],
|
||||
);
|
||||
|
||||
const { connectionStatus } = useMCPConnectionStatus({
|
||||
enabled: !!agent_id && mcpServerNames.length > 0,
|
||||
enabled: !isEphemeralAgent(agent_id) && mcpServerNames.length > 0,
|
||||
});
|
||||
|
||||
const processedData = useMemo(() => {
|
||||
if (!pluginTools) {
|
||||
return {
|
||||
tools: [],
|
||||
groupedTools: {},
|
||||
mcpServersMap: new Map<string, MCPServerInfo>(),
|
||||
};
|
||||
}
|
||||
|
||||
const tools: AgentToolType[] = [];
|
||||
const groupedTools: GroupedToolsRecord = {};
|
||||
|
||||
const mcpServersMap = useMemo(() => {
|
||||
const configuredServers = new Set(mcpServerNames);
|
||||
const mcpServersMap = new Map<string, MCPServerInfo>();
|
||||
const serversMap = new Map<string, MCPServerInfo>();
|
||||
|
||||
for (const pluginTool of pluginTools) {
|
||||
const tool: AgentToolType = {
|
||||
tool_id: pluginTool.pluginKey,
|
||||
metadata: pluginTool as TPlugin,
|
||||
};
|
||||
if (mcpData?.servers) {
|
||||
for (const [serverName, serverData] of Object.entries(mcpData.servers)) {
|
||||
const metadata = {
|
||||
name: serverName,
|
||||
pluginKey: serverName,
|
||||
description: `${localize('com_ui_tool_collection_prefix')} ${serverName}`,
|
||||
icon: serverData.icon || '',
|
||||
authConfig: serverData.authConfig,
|
||||
authenticated: serverData.authenticated,
|
||||
} as TPlugin;
|
||||
|
||||
tools.push(tool);
|
||||
const tools = serverData.tools.map((tool) => ({
|
||||
tool_id: tool.pluginKey,
|
||||
metadata: {
|
||||
...tool,
|
||||
icon: serverData.icon,
|
||||
authConfig: serverData.authConfig,
|
||||
authenticated: serverData.authenticated,
|
||||
} as TPlugin,
|
||||
}));
|
||||
|
||||
if (tool.tool_id.includes(Constants.mcp_delimiter)) {
|
||||
const [_toolName, serverName] = tool.tool_id.split(Constants.mcp_delimiter);
|
||||
|
||||
if (!mcpServersMap.has(serverName)) {
|
||||
const metadata = {
|
||||
name: serverName,
|
||||
pluginKey: serverName,
|
||||
description: `${localize('com_ui_tool_collection_prefix')} ${serverName}`,
|
||||
icon: pluginTool.icon || '',
|
||||
} as TPlugin;
|
||||
|
||||
mcpServersMap.set(serverName, {
|
||||
serverName,
|
||||
tools: [],
|
||||
isConfigured: configuredServers.has(serverName),
|
||||
isConnected: connectionStatus?.[serverName]?.connectionState === 'connected',
|
||||
metadata,
|
||||
});
|
||||
}
|
||||
|
||||
mcpServersMap.get(serverName)!.tools.push(tool);
|
||||
} else {
|
||||
// Non-MCP tool
|
||||
groupedTools[tool.tool_id] = {
|
||||
tool_id: tool.tool_id,
|
||||
metadata: tool.metadata,
|
||||
};
|
||||
serversMap.set(serverName, {
|
||||
serverName,
|
||||
tools,
|
||||
isConfigured: configuredServers.has(serverName),
|
||||
isConnected: connectionStatus?.[serverName]?.connectionState === 'connected',
|
||||
metadata,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Add configured servers that don't have tools yet
|
||||
for (const mcpServerName of mcpServerNames) {
|
||||
if (mcpServersMap.has(mcpServerName)) {
|
||||
if (serversMap.has(mcpServerName)) {
|
||||
continue;
|
||||
}
|
||||
const metadata = {
|
||||
@@ -110,7 +100,7 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
|
||||
description: `${localize('com_ui_tool_collection_prefix')} ${mcpServerName}`,
|
||||
} as TPlugin;
|
||||
|
||||
mcpServersMap.set(mcpServerName, {
|
||||
serversMap.set(mcpServerName, {
|
||||
tools: [],
|
||||
metadata,
|
||||
isConfigured: true,
|
||||
@@ -119,14 +109,8 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
tools,
|
||||
groupedTools,
|
||||
mcpServersMap,
|
||||
};
|
||||
}, [pluginTools, localize, mcpServerNames, connectionStatus]);
|
||||
|
||||
const { agentsConfig, endpointsConfig } = useGetAgentsConfig();
|
||||
return serversMap;
|
||||
}, [mcpData, localize, mcpServerNames, connectionStatus]);
|
||||
|
||||
const value: AgentPanelContextType = {
|
||||
mcp,
|
||||
@@ -137,16 +121,14 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
|
||||
setMcps,
|
||||
agent_id,
|
||||
setAction,
|
||||
pluginTools,
|
||||
activePanel,
|
||||
regularTools,
|
||||
agentsConfig,
|
||||
startupConfig,
|
||||
mcpServersMap,
|
||||
setActivePanel,
|
||||
endpointsConfig,
|
||||
setCurrentAgentId,
|
||||
tools: processedData.tools,
|
||||
groupedTools: processedData.groupedTools,
|
||||
mcpServersMap: processedData.mcpServersMap,
|
||||
};
|
||||
|
||||
return <AgentPanelContext.Provider value={value}>{children}</AgentPanelContext.Provider>;
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
import React, { createContext, useContext, useEffect, useMemo, useRef } from 'react';
|
||||
import React, { createContext, useContext, useEffect, useRef } from 'react';
|
||||
import { useSetRecoilState } from 'recoil';
|
||||
import { Tools, Constants, LocalStorageKeys, AgentCapabilities } from 'librechat-data-provider';
|
||||
import type { TAgentsEndpoint } from 'librechat-data-provider';
|
||||
import {
|
||||
useMCPServerManager,
|
||||
useSearchApiKeyForm,
|
||||
useGetAgentsConfig,
|
||||
useCodeApiKeyForm,
|
||||
useGetMCPTools,
|
||||
useToolToggle,
|
||||
} from '~/hooks';
|
||||
import { getTimestampedValue, setTimestamp } from '~/utils/timestamps';
|
||||
import { ephemeralAgentByConvoId } from '~/store';
|
||||
|
||||
interface BadgeRowContextType {
|
||||
conversationId?: string | null;
|
||||
mcpServerNames?: string[] | null;
|
||||
agentsConfig?: TAgentsEndpoint | null;
|
||||
webSearch: ReturnType<typeof useToolToggle>;
|
||||
artifacts: ReturnType<typeof useToolToggle>;
|
||||
@@ -21,6 +21,7 @@ interface BadgeRowContextType {
|
||||
codeInterpreter: ReturnType<typeof useToolToggle>;
|
||||
codeApiKeyForm: ReturnType<typeof useCodeApiKeyForm>;
|
||||
searchApiKeyForm: ReturnType<typeof useSearchApiKeyForm>;
|
||||
mcpServerManager: ReturnType<typeof useMCPServerManager>;
|
||||
}
|
||||
|
||||
const BadgeRowContext = createContext<BadgeRowContextType | undefined>(undefined);
|
||||
@@ -46,7 +47,6 @@ export default function BadgeRowProvider({
|
||||
}: BadgeRowProviderProps) {
|
||||
const lastKeyRef = useRef<string>('');
|
||||
const hasInitializedRef = useRef(false);
|
||||
const { mcpToolDetails } = useGetMCPTools();
|
||||
const { agentsConfig } = useGetAgentsConfig();
|
||||
const key = conversationId ?? Constants.NEW_CONVO;
|
||||
|
||||
@@ -62,16 +62,15 @@ export default function BadgeRowProvider({
|
||||
hasInitializedRef.current = true;
|
||||
lastKeyRef.current = key;
|
||||
|
||||
// Load all localStorage values
|
||||
const codeToggleKey = `${LocalStorageKeys.LAST_CODE_TOGGLE_}${key}`;
|
||||
const webSearchToggleKey = `${LocalStorageKeys.LAST_WEB_SEARCH_TOGGLE_}${key}`;
|
||||
const fileSearchToggleKey = `${LocalStorageKeys.LAST_FILE_SEARCH_TOGGLE_}${key}`;
|
||||
const artifactsToggleKey = `${LocalStorageKeys.LAST_ARTIFACTS_TOGGLE_}${key}`;
|
||||
|
||||
const codeToggleValue = localStorage.getItem(codeToggleKey);
|
||||
const webSearchToggleValue = localStorage.getItem(webSearchToggleKey);
|
||||
const fileSearchToggleValue = localStorage.getItem(fileSearchToggleKey);
|
||||
const artifactsToggleValue = localStorage.getItem(artifactsToggleKey);
|
||||
const codeToggleValue = getTimestampedValue(codeToggleKey);
|
||||
const webSearchToggleValue = getTimestampedValue(webSearchToggleKey);
|
||||
const fileSearchToggleValue = getTimestampedValue(fileSearchToggleKey);
|
||||
const artifactsToggleValue = getTimestampedValue(artifactsToggleKey);
|
||||
|
||||
const initialValues: Record<string, any> = {};
|
||||
|
||||
@@ -107,15 +106,37 @@ export default function BadgeRowProvider({
|
||||
}
|
||||
}
|
||||
|
||||
// Always set values for all tools (use defaults if not in localStorage)
|
||||
// If ephemeralAgent is null, create a new object with just our tool values
|
||||
setEphemeralAgent((prev) => ({
|
||||
...(prev || {}),
|
||||
/**
|
||||
* Always set values for all tools (use defaults if not in `localStorage`)
|
||||
* If `ephemeralAgent` is `null`, create a new object with just our tool values
|
||||
*/
|
||||
const finalValues = {
|
||||
[Tools.execute_code]: initialValues[Tools.execute_code] ?? false,
|
||||
[Tools.web_search]: initialValues[Tools.web_search] ?? false,
|
||||
[Tools.file_search]: initialValues[Tools.file_search] ?? false,
|
||||
[AgentCapabilities.artifacts]: initialValues[AgentCapabilities.artifacts] ?? false,
|
||||
};
|
||||
|
||||
setEphemeralAgent((prev) => ({
|
||||
...(prev || {}),
|
||||
...finalValues,
|
||||
}));
|
||||
|
||||
Object.entries(finalValues).forEach(([toolKey, value]) => {
|
||||
if (value !== false) {
|
||||
let storageKey = artifactsToggleKey;
|
||||
if (toolKey === Tools.execute_code) {
|
||||
storageKey = codeToggleKey;
|
||||
} else if (toolKey === Tools.web_search) {
|
||||
storageKey = webSearchToggleKey;
|
||||
} else if (toolKey === Tools.file_search) {
|
||||
storageKey = fileSearchToggleKey;
|
||||
}
|
||||
// Store the value and set timestamp for existing values
|
||||
localStorage.setItem(storageKey, JSON.stringify(value));
|
||||
setTimestamp(storageKey);
|
||||
}
|
||||
});
|
||||
}
|
||||
}, [key, isSubmitting, setEphemeralAgent]);
|
||||
|
||||
@@ -165,20 +186,18 @@ export default function BadgeRowProvider({
|
||||
isAuthenticated: true,
|
||||
});
|
||||
|
||||
const mcpServerNames = useMemo(() => {
|
||||
return (mcpToolDetails ?? []).map((tool) => tool.name);
|
||||
}, [mcpToolDetails]);
|
||||
const mcpServerManager = useMCPServerManager({ conversationId });
|
||||
|
||||
const value: BadgeRowContextType = {
|
||||
webSearch,
|
||||
artifacts,
|
||||
fileSearch,
|
||||
agentsConfig,
|
||||
mcpServerNames,
|
||||
conversationId,
|
||||
codeApiKeyForm,
|
||||
codeInterpreter,
|
||||
searchApiKeyForm,
|
||||
mcpServerManager,
|
||||
};
|
||||
|
||||
return <BadgeRowContext.Provider value={value}>{children}</BadgeRowContext.Provider>;
|
||||
|
||||
32
client/src/Providers/DragDropContext.tsx
Normal file
32
client/src/Providers/DragDropContext.tsx
Normal file
@@ -0,0 +1,32 @@
|
||||
import React, { createContext, useContext, useMemo } from 'react';
|
||||
import { useChatContext } from './ChatContext';
|
||||
|
||||
interface DragDropContextValue {
|
||||
conversationId: string | null | undefined;
|
||||
agentId: string | null | undefined;
|
||||
}
|
||||
|
||||
const DragDropContext = createContext<DragDropContextValue | undefined>(undefined);
|
||||
|
||||
export function DragDropProvider({ children }: { children: React.ReactNode }) {
|
||||
const { conversation } = useChatContext();
|
||||
|
||||
/** Context value only created when conversation fields change */
|
||||
const contextValue = useMemo<DragDropContextValue>(
|
||||
() => ({
|
||||
conversationId: conversation?.conversationId,
|
||||
agentId: conversation?.agent_id,
|
||||
}),
|
||||
[conversation?.conversationId, conversation?.agent_id],
|
||||
);
|
||||
|
||||
return <DragDropContext.Provider value={contextValue}>{children}</DragDropContext.Provider>;
|
||||
}
|
||||
|
||||
export function useDragDropContext() {
|
||||
const context = useContext(DragDropContext);
|
||||
if (!context) {
|
||||
throw new Error('useDragDropContext must be used within DragDropProvider');
|
||||
}
|
||||
return context;
|
||||
}
|
||||
@@ -1,10 +1,15 @@
|
||||
import { createContext, useContext } from 'react';
|
||||
|
||||
type MessageContext = {
|
||||
messageId: string;
|
||||
nextType?: string;
|
||||
partIndex?: number;
|
||||
isExpanded: boolean;
|
||||
conversationId?: string | null;
|
||||
/** Submission state for cursor display - only true for latest message when submitting */
|
||||
isSubmitting?: boolean;
|
||||
/** Whether this is the latest message in the conversation */
|
||||
isLatestMessage?: boolean;
|
||||
};
|
||||
|
||||
export const MessageContext = createContext<MessageContext>({} as MessageContext);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user