Compare commits
26 Commits
feat/ui-ad
...
v0.7.9-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c70d1db96 | ||
|
|
543281da6c | ||
|
|
24800bfbeb | ||
|
|
07e08143e4 | ||
|
|
8ba61a86f4 | ||
|
|
56ad92fb1c | ||
|
|
1ceb52d2b5 | ||
|
|
5d267aa8e2 | ||
|
|
59d00e99f3 | ||
|
|
738d04fac4 | ||
|
|
8a5dbac0f9 | ||
|
|
434289fe92 | ||
|
|
a648ad3d13 | ||
|
|
55d63caaf4 | ||
|
|
313539d1ed | ||
|
|
f869d772f7 | ||
|
|
20100e120b | ||
|
|
3f3cfefc52 | ||
|
|
3e1591d404 | ||
|
|
1060ae8040 | ||
|
|
dd67e463e4 | ||
|
|
d60ad61325 | ||
|
|
452151e408 | ||
|
|
33b4a97b42 | ||
|
|
9cdc62b655 | ||
|
|
799f0e5810 |
@@ -453,8 +453,8 @@ OPENID_REUSE_TOKENS=
|
||||
OPENID_JWKS_URL_CACHE_ENABLED=
|
||||
OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
|
||||
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
|
||||
OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED=
|
||||
OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE = "user.read" # example for Scope Needed for Microsoft Graph API
|
||||
OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED=
|
||||
OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE="user.read" # example for Scope Needed for Microsoft Graph API
|
||||
# Set to true to use the OpenID Connect end session endpoint for logout
|
||||
OPENID_USE_END_SESSION_ENDPOINT=
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v0.7.8
|
||||
# v0.7.9-rc1
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Dockerfile.multi
|
||||
# v0.7.8
|
||||
# v0.7.9-rc1
|
||||
|
||||
# Base for all builds
|
||||
FROM node:20-alpine AS base-min
|
||||
|
||||
@@ -13,7 +13,6 @@ const {
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { truncateToolCallOutputs } = require('./prompts');
|
||||
const { addSpaceIfNeeded } = require('~/server/utils');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const TextStream = require('./TextStream');
|
||||
const { logger } = require('~/config');
|
||||
@@ -572,7 +571,7 @@ class BaseClient {
|
||||
});
|
||||
}
|
||||
|
||||
const { generation = '' } = opts;
|
||||
const { editedContent } = opts;
|
||||
|
||||
// It's not necessary to push to currentMessages
|
||||
// depending on subclass implementation of handling messages
|
||||
@@ -587,11 +586,21 @@ class BaseClient {
|
||||
isCreatedByUser: false,
|
||||
model: this.modelOptions?.model ?? this.model,
|
||||
sender: this.sender,
|
||||
text: generation,
|
||||
};
|
||||
this.currentMessages.push(userMessage, latestMessage);
|
||||
} else {
|
||||
latestMessage.text = generation;
|
||||
} else if (editedContent != null) {
|
||||
// Handle editedContent for content parts
|
||||
if (editedContent && latestMessage.content && Array.isArray(latestMessage.content)) {
|
||||
const { index, text, type } = editedContent;
|
||||
if (index >= 0 && index < latestMessage.content.length) {
|
||||
const contentPart = latestMessage.content[index];
|
||||
if (type === ContentTypes.THINK && contentPart.type === ContentTypes.THINK) {
|
||||
contentPart[ContentTypes.THINK] = text;
|
||||
} else if (type === ContentTypes.TEXT && contentPart.type === ContentTypes.TEXT) {
|
||||
contentPart[ContentTypes.TEXT] = text;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
this.continued = true;
|
||||
} else {
|
||||
@@ -672,16 +681,32 @@ class BaseClient {
|
||||
};
|
||||
|
||||
if (typeof completion === 'string') {
|
||||
responseMessage.text = addSpaceIfNeeded(generation) + completion;
|
||||
responseMessage.text = completion;
|
||||
} else if (
|
||||
Array.isArray(completion) &&
|
||||
(this.clientName === EModelEndpoint.agents ||
|
||||
isParamEndpoint(this.options.endpoint, this.options.endpointType))
|
||||
) {
|
||||
responseMessage.text = '';
|
||||
responseMessage.content = completion;
|
||||
|
||||
if (!opts.editedContent || this.currentMessages.length === 0) {
|
||||
responseMessage.content = completion;
|
||||
} else {
|
||||
const latestMessage = this.currentMessages[this.currentMessages.length - 1];
|
||||
if (!latestMessage?.content) {
|
||||
responseMessage.content = completion;
|
||||
} else {
|
||||
const existingContent = [...latestMessage.content];
|
||||
const { type: editedType } = opts.editedContent;
|
||||
responseMessage.content = this.mergeEditedContent(
|
||||
existingContent,
|
||||
completion,
|
||||
editedType,
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if (Array.isArray(completion)) {
|
||||
responseMessage.text = addSpaceIfNeeded(generation) + completion.join('');
|
||||
responseMessage.text = completion.join('');
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -1095,6 +1120,50 @@ class BaseClient {
|
||||
return numTokens;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges completion content with existing content when editing TEXT or THINK types
|
||||
* @param {Array} existingContent - The existing content array
|
||||
* @param {Array} newCompletion - The new completion content
|
||||
* @param {string} editedType - The type of content being edited
|
||||
* @returns {Array} The merged content array
|
||||
*/
|
||||
mergeEditedContent(existingContent, newCompletion, editedType) {
|
||||
if (!newCompletion.length) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
if (editedType !== ContentTypes.TEXT && editedType !== ContentTypes.THINK) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
const lastIndex = existingContent.length - 1;
|
||||
const lastExisting = existingContent[lastIndex];
|
||||
const firstNew = newCompletion[0];
|
||||
|
||||
if (lastExisting?.type !== firstNew?.type || firstNew?.type !== editedType) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
const mergedContent = [...existingContent];
|
||||
if (editedType === ContentTypes.TEXT) {
|
||||
mergedContent[lastIndex] = {
|
||||
...mergedContent[lastIndex],
|
||||
[ContentTypes.TEXT]:
|
||||
(mergedContent[lastIndex][ContentTypes.TEXT] || '') + (firstNew[ContentTypes.TEXT] || ''),
|
||||
};
|
||||
} else {
|
||||
mergedContent[lastIndex] = {
|
||||
...mergedContent[lastIndex],
|
||||
[ContentTypes.THINK]:
|
||||
(mergedContent[lastIndex][ContentTypes.THINK] || '') +
|
||||
(firstNew[ContentTypes.THINK] || ''),
|
||||
};
|
||||
}
|
||||
|
||||
// Add remaining completion items
|
||||
return mergedContent.concat(newCompletion.slice(1));
|
||||
}
|
||||
|
||||
async sendPayload(payload, opts = {}) {
|
||||
if (opts && typeof opts === 'object') {
|
||||
this.setOptions(opts);
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
const axios = require('axios');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { generateShortLivedToken } = require('~/server/services/AuthService');
|
||||
|
||||
const footer = `Use the context as your learned knowledge to better answer the user.
|
||||
|
||||
@@ -18,7 +19,7 @@ function createContextHandlers(req, userMessageContent) {
|
||||
const queryPromises = [];
|
||||
const processedFiles = [];
|
||||
const processedIds = new Set();
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT);
|
||||
|
||||
const query = async (file) => {
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Tools, EToolResources } = require('librechat-data-provider');
|
||||
const { generateShortLivedToken } = require('~/server/services/AuthService');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
*
|
||||
@@ -59,7 +60,7 @@ const createFileSearchTool = async ({ req, files, entity_id }) => {
|
||||
if (files.length === 0) {
|
||||
return 'No files to search. Instruct the user to add files for the search.';
|
||||
}
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
if (!jwtToken) {
|
||||
return 'There was an error authenticating the file search request.';
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -48,7 +48,7 @@
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.50",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@node-saml/passport-saml": "^5.0.0",
|
||||
|
||||
195
api/server/controllers/agents/__tests__/v1.spec.js
Normal file
195
api/server/controllers/agents/__tests__/v1.spec.js
Normal file
@@ -0,0 +1,195 @@
|
||||
const { duplicateAgent } = require('../v1');
|
||||
const { getAgent, createAgent } = require('~/models/Agent');
|
||||
const { getActions } = require('~/models/Action');
|
||||
const { nanoid } = require('nanoid');
|
||||
|
||||
jest.mock('~/models/Agent');
|
||||
jest.mock('~/models/Action');
|
||||
jest.mock('nanoid');
|
||||
|
||||
describe('duplicateAgent', () => {
|
||||
let req, res;
|
||||
|
||||
beforeEach(() => {
|
||||
req = {
|
||||
params: { id: 'agent_123' },
|
||||
user: { id: 'user_456' },
|
||||
};
|
||||
res = {
|
||||
status: jest.fn().mockReturnThis(),
|
||||
json: jest.fn(),
|
||||
};
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
it('should duplicate an agent successfully', async () => {
|
||||
const mockAgent = {
|
||||
id: 'agent_123',
|
||||
name: 'Test Agent',
|
||||
description: 'Test Description',
|
||||
instructions: 'Test Instructions',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
tools: ['file_search'],
|
||||
actions: [],
|
||||
author: 'user_789',
|
||||
versions: [{ name: 'Test Agent', version: 1 }],
|
||||
__v: 0,
|
||||
};
|
||||
|
||||
const mockNewAgent = {
|
||||
id: 'agent_new_123',
|
||||
name: 'Test Agent (1/2/23, 12:34)',
|
||||
description: 'Test Description',
|
||||
instructions: 'Test Instructions',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
tools: ['file_search'],
|
||||
actions: [],
|
||||
author: 'user_456',
|
||||
versions: [
|
||||
{
|
||||
name: 'Test Agent (1/2/23, 12:34)',
|
||||
description: 'Test Description',
|
||||
instructions: 'Test Instructions',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
tools: ['file_search'],
|
||||
actions: [],
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
getAgent.mockResolvedValue(mockAgent);
|
||||
getActions.mockResolvedValue([]);
|
||||
nanoid.mockReturnValue('new_123');
|
||||
createAgent.mockResolvedValue(mockNewAgent);
|
||||
|
||||
await duplicateAgent(req, res);
|
||||
|
||||
expect(getAgent).toHaveBeenCalledWith({ id: 'agent_123' });
|
||||
expect(getActions).toHaveBeenCalledWith({ agent_id: 'agent_123' }, true);
|
||||
expect(createAgent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
id: 'agent_new_123',
|
||||
author: 'user_456',
|
||||
name: expect.stringContaining('Test Agent ('),
|
||||
description: 'Test Description',
|
||||
instructions: 'Test Instructions',
|
||||
provider: 'openai',
|
||||
model: 'gpt-4',
|
||||
tools: ['file_search'],
|
||||
actions: [],
|
||||
}),
|
||||
);
|
||||
|
||||
expect(createAgent).toHaveBeenCalledWith(
|
||||
expect.not.objectContaining({
|
||||
versions: expect.anything(),
|
||||
__v: expect.anything(),
|
||||
}),
|
||||
);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(201);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
agent: mockNewAgent,
|
||||
actions: [],
|
||||
});
|
||||
});
|
||||
|
||||
it('should ensure duplicated agent has clean versions array without nested fields', async () => {
|
||||
const mockAgent = {
|
||||
id: 'agent_123',
|
||||
name: 'Test Agent',
|
||||
description: 'Test Description',
|
||||
versions: [
|
||||
{
|
||||
name: 'Test Agent',
|
||||
versions: [{ name: 'Nested' }],
|
||||
__v: 1,
|
||||
},
|
||||
],
|
||||
__v: 2,
|
||||
};
|
||||
|
||||
const mockNewAgent = {
|
||||
id: 'agent_new_123',
|
||||
name: 'Test Agent (1/2/23, 12:34)',
|
||||
description: 'Test Description',
|
||||
versions: [
|
||||
{
|
||||
name: 'Test Agent (1/2/23, 12:34)',
|
||||
description: 'Test Description',
|
||||
createdAt: new Date(),
|
||||
updatedAt: new Date(),
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
getAgent.mockResolvedValue(mockAgent);
|
||||
getActions.mockResolvedValue([]);
|
||||
nanoid.mockReturnValue('new_123');
|
||||
createAgent.mockResolvedValue(mockNewAgent);
|
||||
|
||||
await duplicateAgent(req, res);
|
||||
|
||||
expect(mockNewAgent.versions).toHaveLength(1);
|
||||
|
||||
const firstVersion = mockNewAgent.versions[0];
|
||||
expect(firstVersion).not.toHaveProperty('versions');
|
||||
expect(firstVersion).not.toHaveProperty('__v');
|
||||
|
||||
expect(mockNewAgent).not.toHaveProperty('__v');
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(201);
|
||||
});
|
||||
|
||||
it('should return 404 if agent not found', async () => {
|
||||
getAgent.mockResolvedValue(null);
|
||||
|
||||
await duplicateAgent(req, res);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(404);
|
||||
expect(res.json).toHaveBeenCalledWith({
|
||||
error: 'Agent not found',
|
||||
status: 'error',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle tool_resources.ocr correctly', async () => {
|
||||
const mockAgent = {
|
||||
id: 'agent_123',
|
||||
name: 'Test Agent',
|
||||
tool_resources: {
|
||||
ocr: { enabled: true, config: 'test' },
|
||||
other: { should: 'not be copied' },
|
||||
},
|
||||
};
|
||||
|
||||
getAgent.mockResolvedValue(mockAgent);
|
||||
getActions.mockResolvedValue([]);
|
||||
nanoid.mockReturnValue('new_123');
|
||||
createAgent.mockResolvedValue({ id: 'agent_new_123' });
|
||||
|
||||
await duplicateAgent(req, res);
|
||||
|
||||
expect(createAgent).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
tool_resources: {
|
||||
ocr: { enabled: true, config: 'test' },
|
||||
},
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle errors gracefully', async () => {
|
||||
getAgent.mockRejectedValue(new Error('Database error'));
|
||||
|
||||
await duplicateAgent(req, res);
|
||||
|
||||
expect(res.status).toHaveBeenCalledWith(500);
|
||||
expect(res.json).toHaveBeenCalledWith({ error: 'Database error' });
|
||||
});
|
||||
});
|
||||
@@ -4,6 +4,7 @@ const {
|
||||
sendEvent,
|
||||
createRun,
|
||||
Tokenizer,
|
||||
checkAccess,
|
||||
memoryInstructions,
|
||||
createMemoryProcessor,
|
||||
} = require('@librechat/api');
|
||||
@@ -39,8 +40,8 @@ const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
|
||||
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { getProviderConfig } = require('~/server/services/Endpoints');
|
||||
const { checkAccess } = require('~/server/middleware/roles/access');
|
||||
const BaseClient = require('~/app/clients/BaseClient');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
const { loadAgent } = require('~/models/Agent');
|
||||
const { getMCPManager } = require('~/config');
|
||||
|
||||
@@ -401,7 +402,12 @@ class AgentClient extends BaseClient {
|
||||
if (user.personalization?.memories === false) {
|
||||
return;
|
||||
}
|
||||
const hasAccess = await checkAccess(user, PermissionTypes.MEMORIES, [Permissions.USE]);
|
||||
const hasAccess = await checkAccess({
|
||||
user,
|
||||
permissionType: PermissionTypes.MEMORIES,
|
||||
permissions: [Permissions.USE],
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
if (!hasAccess) {
|
||||
logger.debug(
|
||||
@@ -519,7 +525,10 @@ class AgentClient extends BaseClient {
|
||||
messagesToProcess = [...messages.slice(-messageWindowSize)];
|
||||
}
|
||||
}
|
||||
return await this.processMemory(messagesToProcess);
|
||||
|
||||
const bufferString = getBufferString(messagesToProcess);
|
||||
const bufferMessage = new HumanMessage(`# Current Chat:\n\n${bufferString}`);
|
||||
return await this.processMemory([bufferMessage]);
|
||||
} catch (error) {
|
||||
logger.error('Memory Agent failed to process memory', error);
|
||||
}
|
||||
|
||||
@@ -14,8 +14,11 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
text,
|
||||
endpointOption,
|
||||
conversationId,
|
||||
isContinued = false,
|
||||
editedContent = null,
|
||||
parentMessageId = null,
|
||||
overrideParentMessageId = null,
|
||||
responseMessageId: editedResponseMessageId = null,
|
||||
} = req.body;
|
||||
|
||||
let sender;
|
||||
@@ -67,7 +70,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
handler();
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
logger.error('[AgentController] Error in cleanup handler', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -155,7 +158,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
try {
|
||||
res.removeListener('close', closeHandler);
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
logger.error('[AgentController] Error removing close listener', e);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -163,10 +166,14 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
user: userId,
|
||||
onStart,
|
||||
getReqData,
|
||||
isContinued,
|
||||
editedContent,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
abortController,
|
||||
overrideParentMessageId,
|
||||
isEdited: !!editedContent,
|
||||
responseMessageId: editedResponseMessageId,
|
||||
progressOptions: {
|
||||
res,
|
||||
},
|
||||
|
||||
@@ -242,6 +242,8 @@ const duplicateAgentHandler = async (req, res) => {
|
||||
createdAt: _createdAt,
|
||||
updatedAt: _updatedAt,
|
||||
tool_resources: _tool_resources = {},
|
||||
versions: _versions,
|
||||
__v: _v,
|
||||
...cloneData
|
||||
} = agent;
|
||||
cloneData.name = `${agent.name} (${new Date().toLocaleString('en-US', {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const { nanoid } = require('nanoid');
|
||||
const { EnvVar } = require('@librechat/agents');
|
||||
const { checkAccess } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
Tools,
|
||||
AuthType,
|
||||
@@ -13,9 +15,8 @@ const { processCodeOutput } = require('~/server/services/Files/Code/process');
|
||||
const { createToolCall, getToolCallsByConvo } = require('~/models/ToolCall');
|
||||
const { loadAuthValues } = require('~/server/services/Tools/credentials');
|
||||
const { loadTools } = require('~/app/clients/tools/util');
|
||||
const { checkAccess } = require('~/server/middleware');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
const { getMessage } = require('~/models/Message');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const fieldsMap = {
|
||||
[Tools.execute_code]: [EnvVar.CODE_API_KEY],
|
||||
@@ -79,6 +80,7 @@ const verifyToolAuth = async (req, res) => {
|
||||
throwError: false,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('Error loading auth values', error);
|
||||
res.status(200).json({ authenticated: false, message: AuthType.USER_PROVIDED });
|
||||
return;
|
||||
}
|
||||
@@ -132,7 +134,12 @@ const callTool = async (req, res) => {
|
||||
logger.debug(`[${toolId}/call] User: ${req.user.id}`);
|
||||
let hasAccess = true;
|
||||
if (toolAccessPermType[toolId]) {
|
||||
hasAccess = await checkAccess(req.user, toolAccessPermType[toolId], [Permissions.USE]);
|
||||
hasAccess = await checkAccess({
|
||||
user: req.user,
|
||||
permissionType: toolAccessPermType[toolId],
|
||||
permissions: [Permissions.USE],
|
||||
getRoleByName,
|
||||
});
|
||||
}
|
||||
if (!hasAccess) {
|
||||
logger.warn(
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Core function to check if a user has one or more required permissions
|
||||
*
|
||||
* @param {object} user - The user object
|
||||
* @param {PermissionTypes} permissionType - The type of permission to check
|
||||
* @param {Permissions[]} permissions - The list of specific permissions to check
|
||||
* @param {Record<Permissions, string[]>} [bodyProps] - An optional object where keys are permissions and values are arrays of properties to check
|
||||
* @param {object} [checkObject] - The object to check properties against
|
||||
* @returns {Promise<boolean>} Whether the user has the required permissions
|
||||
*/
|
||||
const checkAccess = async (user, permissionType, permissions, bodyProps = {}, checkObject = {}) => {
|
||||
if (!user) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const role = await getRoleByName(user.role);
|
||||
if (role && role.permissions && role.permissions[permissionType]) {
|
||||
const hasAnyPermission = permissions.some((permission) => {
|
||||
if (role.permissions[permissionType][permission]) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (bodyProps[permission] && checkObject) {
|
||||
return bodyProps[permission].some((prop) =>
|
||||
Object.prototype.hasOwnProperty.call(checkObject, prop),
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
|
||||
return hasAnyPermission;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Middleware to check if a user has one or more required permissions, optionally based on `req.body` properties.
|
||||
*
|
||||
* @param {PermissionTypes} permissionType - The type of permission to check.
|
||||
* @param {Permissions[]} permissions - The list of specific permissions to check.
|
||||
* @param {Record<Permissions, string[]>} [bodyProps] - An optional object where keys are permissions and values are arrays of `req.body` properties to check.
|
||||
* @returns {(req: ServerRequest, res: ServerResponse, next: NextFunction) => Promise<void>} Express middleware function.
|
||||
*/
|
||||
const generateCheckAccess = (permissionType, permissions, bodyProps = {}) => {
|
||||
return async (req, res, next) => {
|
||||
try {
|
||||
const hasAccess = await checkAccess(
|
||||
req.user,
|
||||
permissionType,
|
||||
permissions,
|
||||
bodyProps,
|
||||
req.body,
|
||||
);
|
||||
|
||||
if (hasAccess) {
|
||||
return next();
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
`[${permissionType}] Forbidden: Insufficient permissions for User ${req.user.id}: ${permissions.join(', ')}`,
|
||||
);
|
||||
return res.status(403).json({ message: 'Forbidden: Insufficient permissions' });
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return res.status(500).json({ message: `Server error: ${error.message}` });
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
checkAccess,
|
||||
generateCheckAccess,
|
||||
};
|
||||
@@ -1,8 +1,5 @@
|
||||
const checkAdmin = require('./admin');
|
||||
const { checkAccess, generateCheckAccess } = require('./access');
|
||||
|
||||
module.exports = {
|
||||
checkAdmin,
|
||||
checkAccess,
|
||||
generateCheckAccess,
|
||||
};
|
||||
|
||||
@@ -1,14 +1,28 @@
|
||||
const express = require('express');
|
||||
const { nanoid } = require('nanoid');
|
||||
const { actionDelimiter, SystemRoles, removeNullishValues } = require('librechat-data-provider');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { generateCheckAccess } = require('@librechat/api');
|
||||
const {
|
||||
SystemRoles,
|
||||
Permissions,
|
||||
PermissionTypes,
|
||||
actionDelimiter,
|
||||
removeNullishValues,
|
||||
} = require('librechat-data-provider');
|
||||
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
|
||||
const { updateAction, getActions, deleteAction } = require('~/models/Action');
|
||||
const { isActionDomainAllowed } = require('~/server/services/domains');
|
||||
const { getAgent, updateAgent } = require('~/models/Agent');
|
||||
const { logger } = require('~/config');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const checkAgentCreate = generateCheckAccess({
|
||||
permissionType: PermissionTypes.AGENTS,
|
||||
permissions: [Permissions.USE, Permissions.CREATE],
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
// If the user has ADMIN role
|
||||
// then action edition is possible even if not owner of the assistant
|
||||
const isAdmin = (req) => {
|
||||
@@ -41,7 +55,7 @@ router.get('/', async (req, res) => {
|
||||
* @param {ActionMetadata} req.body.metadata - Metadata for the action.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
*/
|
||||
router.post('/:agent_id', async (req, res) => {
|
||||
router.post('/:agent_id', checkAgentCreate, async (req, res) => {
|
||||
try {
|
||||
const { agent_id } = req.params;
|
||||
|
||||
@@ -149,7 +163,7 @@ router.post('/:agent_id', async (req, res) => {
|
||||
* @param {string} req.params.action_id - The ID of the action to delete.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
*/
|
||||
router.delete('/:agent_id/:action_id', async (req, res) => {
|
||||
router.delete('/:agent_id/:action_id', checkAgentCreate, async (req, res) => {
|
||||
try {
|
||||
const { agent_id, action_id } = req.params;
|
||||
const admin = isAdmin(req);
|
||||
|
||||
@@ -1,22 +1,28 @@
|
||||
const express = require('express');
|
||||
const { generateCheckAccess, skipAgentCheck } = require('@librechat/api');
|
||||
const { PermissionTypes, Permissions } = require('librechat-data-provider');
|
||||
const {
|
||||
setHeaders,
|
||||
moderateText,
|
||||
// validateModel,
|
||||
generateCheckAccess,
|
||||
validateConvoAccess,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/agents');
|
||||
const AgentController = require('~/server/controllers/agents/request');
|
||||
const addTitle = require('~/server/services/Endpoints/agents/title');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
router.use(moderateText);
|
||||
|
||||
const checkAgentAccess = generateCheckAccess(PermissionTypes.AGENTS, [Permissions.USE]);
|
||||
const checkAgentAccess = generateCheckAccess({
|
||||
permissionType: PermissionTypes.AGENTS,
|
||||
permissions: [Permissions.USE],
|
||||
skipCheck: skipAgentCheck,
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
router.use(checkAgentAccess);
|
||||
router.use(validateConvoAccess);
|
||||
|
||||
@@ -1,29 +1,36 @@
|
||||
const express = require('express');
|
||||
const { generateCheckAccess } = require('@librechat/api');
|
||||
const { PermissionTypes, Permissions } = require('librechat-data-provider');
|
||||
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
const v1 = require('~/server/controllers/agents/v1');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
const actions = require('./actions');
|
||||
const tools = require('./tools');
|
||||
|
||||
const router = express.Router();
|
||||
const avatar = express.Router();
|
||||
|
||||
const checkAgentAccess = generateCheckAccess(PermissionTypes.AGENTS, [Permissions.USE]);
|
||||
const checkAgentCreate = generateCheckAccess(PermissionTypes.AGENTS, [
|
||||
Permissions.USE,
|
||||
Permissions.CREATE,
|
||||
]);
|
||||
const checkAgentAccess = generateCheckAccess({
|
||||
permissionType: PermissionTypes.AGENTS,
|
||||
permissions: [Permissions.USE],
|
||||
getRoleByName,
|
||||
});
|
||||
const checkAgentCreate = generateCheckAccess({
|
||||
permissionType: PermissionTypes.AGENTS,
|
||||
permissions: [Permissions.USE, Permissions.CREATE],
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
const checkGlobalAgentShare = generateCheckAccess(
|
||||
PermissionTypes.AGENTS,
|
||||
[Permissions.USE, Permissions.CREATE],
|
||||
{
|
||||
const checkGlobalAgentShare = generateCheckAccess({
|
||||
permissionType: PermissionTypes.AGENTS,
|
||||
permissions: [Permissions.USE, Permissions.CREATE],
|
||||
bodyProps: {
|
||||
[Permissions.SHARED_GLOBAL]: ['projectIds', 'removeProjectIds'],
|
||||
},
|
||||
);
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
router.use(checkAgentAccess);
|
||||
|
||||
/**
|
||||
* Agent actions route.
|
||||
|
||||
@@ -477,7 +477,9 @@ describe('Multer Configuration', () => {
|
||||
done(new Error('Expected mkdirSync to throw an error but no error was thrown'));
|
||||
} catch (error) {
|
||||
// This is the expected behavior - mkdirSync throws synchronously for invalid paths
|
||||
expect(error.code).toBe('EACCES');
|
||||
// On Linux, this typically returns EACCES (permission denied)
|
||||
// On macOS/Darwin, this returns ENOENT (no such file or directory)
|
||||
expect(['EACCES', 'ENOENT']).toContain(error.code);
|
||||
done();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1,37 +1,43 @@
|
||||
const express = require('express');
|
||||
const { Tokenizer } = require('@librechat/api');
|
||||
const { Tokenizer, generateCheckAccess } = require('@librechat/api');
|
||||
const { PermissionTypes, Permissions } = require('librechat-data-provider');
|
||||
const {
|
||||
getAllUserMemories,
|
||||
toggleUserMemories,
|
||||
createMemory,
|
||||
setMemory,
|
||||
deleteMemory,
|
||||
setMemory,
|
||||
} = require('~/models');
|
||||
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const checkMemoryRead = generateCheckAccess(PermissionTypes.MEMORIES, [
|
||||
Permissions.USE,
|
||||
Permissions.READ,
|
||||
]);
|
||||
const checkMemoryCreate = generateCheckAccess(PermissionTypes.MEMORIES, [
|
||||
Permissions.USE,
|
||||
Permissions.CREATE,
|
||||
]);
|
||||
const checkMemoryUpdate = generateCheckAccess(PermissionTypes.MEMORIES, [
|
||||
Permissions.USE,
|
||||
Permissions.UPDATE,
|
||||
]);
|
||||
const checkMemoryDelete = generateCheckAccess(PermissionTypes.MEMORIES, [
|
||||
Permissions.USE,
|
||||
Permissions.UPDATE,
|
||||
]);
|
||||
const checkMemoryOptOut = generateCheckAccess(PermissionTypes.MEMORIES, [
|
||||
Permissions.USE,
|
||||
Permissions.OPT_OUT,
|
||||
]);
|
||||
const checkMemoryRead = generateCheckAccess({
|
||||
permissionType: PermissionTypes.MEMORIES,
|
||||
permissions: [Permissions.USE, Permissions.READ],
|
||||
getRoleByName,
|
||||
});
|
||||
const checkMemoryCreate = generateCheckAccess({
|
||||
permissionType: PermissionTypes.MEMORIES,
|
||||
permissions: [Permissions.USE, Permissions.CREATE],
|
||||
getRoleByName,
|
||||
});
|
||||
const checkMemoryUpdate = generateCheckAccess({
|
||||
permissionType: PermissionTypes.MEMORIES,
|
||||
permissions: [Permissions.USE, Permissions.UPDATE],
|
||||
getRoleByName,
|
||||
});
|
||||
const checkMemoryDelete = generateCheckAccess({
|
||||
permissionType: PermissionTypes.MEMORIES,
|
||||
permissions: [Permissions.USE, Permissions.UPDATE],
|
||||
getRoleByName,
|
||||
});
|
||||
const checkMemoryOptOut = generateCheckAccess({
|
||||
permissionType: PermissionTypes.MEMORIES,
|
||||
permissions: [Permissions.USE, Permissions.OPT_OUT],
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
|
||||
|
||||
@@ -235,12 +235,13 @@ router.put('/:conversationId/:messageId', validateMessageReq, async (req, res) =
|
||||
return res.status(400).json({ error: 'Content part not found' });
|
||||
}
|
||||
|
||||
if (updatedContent[index].type !== ContentTypes.TEXT) {
|
||||
const currentPartType = updatedContent[index].type;
|
||||
if (currentPartType !== ContentTypes.TEXT && currentPartType !== ContentTypes.THINK) {
|
||||
return res.status(400).json({ error: 'Cannot update non-text content' });
|
||||
}
|
||||
|
||||
const oldText = updatedContent[index].text;
|
||||
updatedContent[index] = { type: ContentTypes.TEXT, text };
|
||||
const oldText = updatedContent[index][currentPartType];
|
||||
updatedContent[index] = { type: currentPartType, [currentPartType]: text };
|
||||
|
||||
let tokenCount = message.tokenCount;
|
||||
if (tokenCount !== undefined) {
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const express = require('express');
|
||||
const { PermissionTypes, Permissions, SystemRoles } = require('librechat-data-provider');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { generateCheckAccess } = require('@librechat/api');
|
||||
const { Permissions, SystemRoles, PermissionTypes } = require('librechat-data-provider');
|
||||
const {
|
||||
getPrompt,
|
||||
getPrompts,
|
||||
@@ -14,24 +16,30 @@ const {
|
||||
// updatePromptLabels,
|
||||
makePromptProduction,
|
||||
} = require('~/models/Prompt');
|
||||
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
|
||||
const { logger } = require('~/config');
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const checkPromptAccess = generateCheckAccess(PermissionTypes.PROMPTS, [Permissions.USE]);
|
||||
const checkPromptCreate = generateCheckAccess(PermissionTypes.PROMPTS, [
|
||||
Permissions.USE,
|
||||
Permissions.CREATE,
|
||||
]);
|
||||
const checkPromptAccess = generateCheckAccess({
|
||||
permissionType: PermissionTypes.PROMPTS,
|
||||
permissions: [Permissions.USE],
|
||||
getRoleByName,
|
||||
});
|
||||
const checkPromptCreate = generateCheckAccess({
|
||||
permissionType: PermissionTypes.PROMPTS,
|
||||
permissions: [Permissions.USE, Permissions.CREATE],
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
const checkGlobalPromptShare = generateCheckAccess(
|
||||
PermissionTypes.PROMPTS,
|
||||
[Permissions.USE, Permissions.CREATE],
|
||||
{
|
||||
const checkGlobalPromptShare = generateCheckAccess({
|
||||
permissionType: PermissionTypes.PROMPTS,
|
||||
permissions: [Permissions.USE, Permissions.CREATE],
|
||||
bodyProps: {
|
||||
[Permissions.SHARED_GLOBAL]: ['projectIds', 'removeProjectIds'],
|
||||
},
|
||||
);
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
router.use(checkPromptAccess);
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
const express = require('express');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { generateCheckAccess } = require('@librechat/api');
|
||||
const { PermissionTypes, Permissions } = require('librechat-data-provider');
|
||||
const {
|
||||
getConversationTags,
|
||||
updateTagsForConversation,
|
||||
updateConversationTag,
|
||||
createConversationTag,
|
||||
deleteConversationTag,
|
||||
updateTagsForConversation,
|
||||
getConversationTags,
|
||||
} = require('~/models/ConversationTag');
|
||||
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
|
||||
const { logger } = require('~/config');
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const checkBookmarkAccess = generateCheckAccess(PermissionTypes.BOOKMARKS, [Permissions.USE]);
|
||||
const checkBookmarkAccess = generateCheckAccess({
|
||||
permissionType: PermissionTypes.BOOKMARKS,
|
||||
permissions: [Permissions.USE],
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
router.use(checkBookmarkAccess);
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const bcrypt = require('bcryptjs');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const { webcrypto } = require('node:crypto');
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
@@ -499,6 +500,18 @@ const resendVerificationEmail = async (req) => {
|
||||
};
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Generate a short-lived JWT token
|
||||
* @param {String} userId - The ID of the user
|
||||
* @param {String} [expireIn='5m'] - The expiration time for the token (default is 5 minutes)
|
||||
* @returns {String} - The generated JWT token
|
||||
*/
|
||||
const generateShortLivedToken = (userId, expireIn = '5m') => {
|
||||
return jwt.sign({ id: userId }, process.env.JWT_SECRET, {
|
||||
expiresIn: expireIn,
|
||||
algorithm: 'HS256',
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
logoutUser,
|
||||
@@ -506,7 +519,8 @@ module.exports = {
|
||||
registerUser,
|
||||
setAuthTokens,
|
||||
resetPassword,
|
||||
setOpenIDAuthTokens,
|
||||
requestPasswordReset,
|
||||
resendVerificationEmail,
|
||||
setOpenIDAuthTokens,
|
||||
generateShortLivedToken,
|
||||
};
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
const path = require('path');
|
||||
const { loadServiceKey } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const { config } = require('./EndpointService');
|
||||
@@ -11,9 +13,13 @@ const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, go
|
||||
async function loadAsyncEndpoints(req) {
|
||||
let i = 0;
|
||||
let serviceKey, googleUserProvides;
|
||||
const serviceKeyPath =
|
||||
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
|
||||
path.join(__dirname, '../../..', 'data', 'auth.json');
|
||||
|
||||
try {
|
||||
serviceKey = require('~/data/auth.json');
|
||||
} catch (e) {
|
||||
serviceKey = await loadServiceKey(serviceKeyPath);
|
||||
} catch {
|
||||
if (i === 0) {
|
||||
i++;
|
||||
}
|
||||
@@ -32,14 +38,14 @@ async function loadAsyncEndpoints(req) {
|
||||
const gptPlugins =
|
||||
useAzure || openAIApiKey || azureOpenAIApiKey
|
||||
? {
|
||||
availableAgents: ['classic', 'functions'],
|
||||
userProvide: useAzure ? false : userProvidedOpenAI,
|
||||
userProvideURL: useAzure
|
||||
? false
|
||||
: config[EModelEndpoint.openAI]?.userProvideURL ||
|
||||
availableAgents: ['classic', 'functions'],
|
||||
userProvide: useAzure ? false : userProvidedOpenAI,
|
||||
userProvideURL: useAzure
|
||||
? false
|
||||
: config[EModelEndpoint.openAI]?.userProvideURL ||
|
||||
config[EModelEndpoint.azureOpenAI]?.userProvideURL,
|
||||
azure: useAzurePlugins || useAzure,
|
||||
}
|
||||
azure: useAzurePlugins || useAzure,
|
||||
}
|
||||
: false;
|
||||
|
||||
return { google, gptPlugins };
|
||||
|
||||
@@ -85,7 +85,7 @@ const initializeAgent = async ({
|
||||
});
|
||||
|
||||
const provider = agent.provider;
|
||||
const { tools, toolContextMap } =
|
||||
const { tools: structuredTools, toolContextMap } =
|
||||
(await loadTools?.({
|
||||
req,
|
||||
res,
|
||||
@@ -140,6 +140,22 @@ const initializeAgent = async ({
|
||||
agent.provider = options.provider;
|
||||
}
|
||||
|
||||
/** @type {import('@librechat/agents').GenericTool[]} */
|
||||
let tools = options.tools?.length ? options.tools : structuredTools;
|
||||
if (
|
||||
(agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) &&
|
||||
options.tools?.length &&
|
||||
structuredTools?.length
|
||||
) {
|
||||
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
|
||||
} else if (
|
||||
(agent.provider === Providers.OPENAI || agent.provider === Providers.AZURE) &&
|
||||
options.tools?.length &&
|
||||
structuredTools?.length
|
||||
) {
|
||||
tools = structuredTools.concat(options.tools);
|
||||
}
|
||||
|
||||
/** @type {import('@librechat/agents').ClientOptions} */
|
||||
agent.model_parameters = { ...options.llmConfig };
|
||||
if (options.configOptions) {
|
||||
@@ -162,10 +178,10 @@ const initializeAgent = async ({
|
||||
|
||||
return {
|
||||
...agent,
|
||||
tools,
|
||||
attachments,
|
||||
resendFiles,
|
||||
toolContextMap,
|
||||
tools,
|
||||
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { getGoogleConfig, isEnabled } = require('@librechat/api');
|
||||
const path = require('path');
|
||||
const { EModelEndpoint, AuthKeys } = require('librechat-data-provider');
|
||||
const { getGoogleConfig, isEnabled, loadServiceKey } = require('@librechat/api');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { GoogleClient } = require('~/app');
|
||||
|
||||
@@ -15,8 +16,15 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
}
|
||||
|
||||
let serviceKey = {};
|
||||
|
||||
try {
|
||||
serviceKey = require('~/data/auth.json');
|
||||
const serviceKeyPath =
|
||||
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
|
||||
path.join(__dirname, '../../../..', 'data', 'auth.json');
|
||||
serviceKey = await loadServiceKey(serviceKeyPath);
|
||||
if (!serviceKey) {
|
||||
serviceKey = {};
|
||||
}
|
||||
} catch (_e) {
|
||||
// Do nothing
|
||||
}
|
||||
|
||||
@@ -7,6 +7,16 @@ const initCustom = require('~/server/services/Endpoints/custom/initialize');
|
||||
const initGoogle = require('~/server/services/Endpoints/google/initialize');
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
|
||||
/** Check if the provider is a known custom provider
|
||||
* @param {string | undefined} [provider] - The provider string
|
||||
* @returns {boolean} - True if the provider is a known custom provider, false otherwise
|
||||
*/
|
||||
function isKnownCustomProvider(provider) {
|
||||
return [Providers.XAI, Providers.OLLAMA, Providers.DEEPSEEK, Providers.OPENROUTER].includes(
|
||||
provider || '',
|
||||
);
|
||||
}
|
||||
|
||||
const providerConfigMap = {
|
||||
[Providers.XAI]: initCustom,
|
||||
[Providers.OLLAMA]: initCustom,
|
||||
@@ -46,6 +56,13 @@ async function getProviderConfig(provider) {
|
||||
overrideProvider = Providers.OPENAI;
|
||||
}
|
||||
|
||||
if (isKnownCustomProvider(overrideProvider)) {
|
||||
customEndpointConfig = await getCustomEndpointConfig(provider);
|
||||
if (!customEndpointConfig) {
|
||||
throw new Error(`Provider ${provider} not supported`);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
getOptions,
|
||||
overrideProvider,
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const axios = require('axios');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { generateShortLivedToken } = require('~/server/services/AuthService');
|
||||
const { getBufferMetadata } = require('~/server/utils');
|
||||
const paths = require('~/config/paths');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Saves a file to a specified output path with a new filename.
|
||||
@@ -206,7 +207,7 @@ const deleteLocalFile = async (req, file) => {
|
||||
const cleanFilepath = file.filepath.split('?')[0];
|
||||
|
||||
if (file.embedded && process.env.RAG_API_URL) {
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
axios.delete(`${process.env.RAG_API_URL}/documents`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
|
||||
@@ -4,6 +4,7 @@ const FormData = require('form-data');
|
||||
const { logAxiosError } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { FileSources } = require('librechat-data-provider');
|
||||
const { generateShortLivedToken } = require('~/server/services/AuthService');
|
||||
|
||||
/**
|
||||
* Deletes a file from the vector database. This function takes a file object, constructs the full path, and
|
||||
@@ -23,7 +24,8 @@ const deleteVectors = async (req, file) => {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
|
||||
return await axios.delete(`${process.env.RAG_API_URL}/documents`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
@@ -70,7 +72,7 @@ async function uploadVectors({ req, file, file_id, entity_id }) {
|
||||
}
|
||||
|
||||
try {
|
||||
const jwtToken = req.headers.authorization.split(' ')[1];
|
||||
const jwtToken = generateShortLivedToken(req.user.id);
|
||||
const formData = new FormData();
|
||||
formData.append('file_id', file_id);
|
||||
formData.append('file', fs.createReadStream(file.path));
|
||||
|
||||
@@ -55,7 +55,9 @@ const processFiles = async (files, fileIds) => {
|
||||
}
|
||||
|
||||
if (!fileIds) {
|
||||
return await Promise.all(promises);
|
||||
const results = await Promise.all(promises);
|
||||
// Filter out null results from failed updateFileUsage calls
|
||||
return results.filter((result) => result != null);
|
||||
}
|
||||
|
||||
for (let file_id of fileIds) {
|
||||
@@ -67,7 +69,9 @@ const processFiles = async (files, fileIds) => {
|
||||
}
|
||||
|
||||
// TODO: calculate token cost when image is first uploaded
|
||||
return await Promise.all(promises);
|
||||
const results = await Promise.all(promises);
|
||||
// Filter out null results from failed updateFileUsage calls
|
||||
return results.filter((result) => result != null);
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
208
api/server/services/Files/processFiles.test.js
Normal file
208
api/server/services/Files/processFiles.test.js
Normal file
@@ -0,0 +1,208 @@
|
||||
// Mock the updateFileUsage function before importing the actual processFiles
|
||||
jest.mock('~/models/File', () => ({
|
||||
updateFileUsage: jest.fn(),
|
||||
}));
|
||||
|
||||
// Mock winston and logger configuration to avoid dependency issues
|
||||
jest.mock('~/config', () => ({
|
||||
logger: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
debug: jest.fn(),
|
||||
error: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
// Mock all other dependencies that might cause issues
|
||||
jest.mock('librechat-data-provider', () => ({
|
||||
isUUID: { parse: jest.fn() },
|
||||
megabyte: 1024 * 1024,
|
||||
FileContext: { message_attachment: 'message_attachment' },
|
||||
FileSources: { local: 'local' },
|
||||
EModelEndpoint: { assistants: 'assistants' },
|
||||
EToolResources: { file_search: 'file_search' },
|
||||
mergeFileConfig: jest.fn(),
|
||||
removeNullishValues: jest.fn((obj) => obj),
|
||||
isAssistantsEndpoint: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Files/images', () => ({
|
||||
convertImage: jest.fn(),
|
||||
resizeAndConvert: jest.fn(),
|
||||
resizeImageBuffer: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/controllers/assistants/v2', () => ({
|
||||
addResourceFileId: jest.fn(),
|
||||
deleteResourceFileId: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/models/Agent', () => ({
|
||||
addAgentResourceFile: jest.fn(),
|
||||
removeAgentResourceFiles: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/controllers/assistants/helpers', () => ({
|
||||
getOpenAIClient: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Tools/credentials', () => ({
|
||||
loadAuthValues: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/services/Config', () => ({
|
||||
checkCapability: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/utils/queue', () => ({
|
||||
LB_QueueAsyncCall: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('./strategies', () => ({
|
||||
getStrategyFunctions: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('~/server/utils', () => ({
|
||||
determineFileType: jest.fn(),
|
||||
}));
|
||||
|
||||
// Import the actual processFiles function after all mocks are set up
|
||||
const { processFiles } = require('./process');
|
||||
const { updateFileUsage } = require('~/models/File');
|
||||
|
||||
describe('processFiles', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('null filtering functionality', () => {
|
||||
it('should filter out null results from updateFileUsage when files do not exist', async () => {
|
||||
const mockFiles = [
|
||||
{ file_id: 'existing-file-1' },
|
||||
{ file_id: 'non-existent-file' },
|
||||
{ file_id: 'existing-file-2' },
|
||||
];
|
||||
|
||||
// Mock updateFileUsage to return null for non-existent files
|
||||
updateFileUsage.mockImplementation(({ file_id }) => {
|
||||
if (file_id === 'non-existent-file') {
|
||||
return Promise.resolve(null); // Simulate file not found in the database
|
||||
}
|
||||
return Promise.resolve({ file_id, usage: 1 });
|
||||
});
|
||||
|
||||
const result = await processFiles(mockFiles);
|
||||
|
||||
expect(updateFileUsage).toHaveBeenCalledTimes(3);
|
||||
expect(result).toEqual([
|
||||
{ file_id: 'existing-file-1', usage: 1 },
|
||||
{ file_id: 'existing-file-2', usage: 1 },
|
||||
]);
|
||||
|
||||
// Critical test - ensure no null values in result
|
||||
expect(result).not.toContain(null);
|
||||
expect(result).not.toContain(undefined);
|
||||
expect(result.length).toBe(2); // Only valid files should be returned
|
||||
});
|
||||
|
||||
it('should return empty array when all updateFileUsage calls return null', async () => {
|
||||
const mockFiles = [{ file_id: 'non-existent-1' }, { file_id: 'non-existent-2' }];
|
||||
|
||||
// All updateFileUsage calls return null
|
||||
updateFileUsage.mockResolvedValue(null);
|
||||
|
||||
const result = await processFiles(mockFiles);
|
||||
|
||||
expect(updateFileUsage).toHaveBeenCalledTimes(2);
|
||||
expect(result).toEqual([]);
|
||||
expect(result).not.toContain(null);
|
||||
expect(result.length).toBe(0);
|
||||
});
|
||||
|
||||
it('should work correctly when all files exist', async () => {
|
||||
const mockFiles = [{ file_id: 'file-1' }, { file_id: 'file-2' }];
|
||||
|
||||
updateFileUsage.mockImplementation(({ file_id }) => {
|
||||
return Promise.resolve({ file_id, usage: 1 });
|
||||
});
|
||||
|
||||
const result = await processFiles(mockFiles);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ file_id: 'file-1', usage: 1 },
|
||||
{ file_id: 'file-2', usage: 1 },
|
||||
]);
|
||||
expect(result).not.toContain(null);
|
||||
expect(result.length).toBe(2);
|
||||
});
|
||||
|
||||
it('should handle fileIds parameter and filter nulls correctly', async () => {
|
||||
const mockFiles = [{ file_id: 'file-1' }];
|
||||
const mockFileIds = ['file-2', 'non-existent-file'];
|
||||
|
||||
updateFileUsage.mockImplementation(({ file_id }) => {
|
||||
if (file_id === 'non-existent-file') {
|
||||
return Promise.resolve(null);
|
||||
}
|
||||
return Promise.resolve({ file_id, usage: 1 });
|
||||
});
|
||||
|
||||
const result = await processFiles(mockFiles, mockFileIds);
|
||||
|
||||
expect(result).toEqual([
|
||||
{ file_id: 'file-1', usage: 1 },
|
||||
{ file_id: 'file-2', usage: 1 },
|
||||
]);
|
||||
expect(result).not.toContain(null);
|
||||
expect(result).not.toContain(undefined);
|
||||
expect(result.length).toBe(2);
|
||||
});
|
||||
|
||||
it('should handle duplicate file_ids correctly', async () => {
|
||||
const mockFiles = [
|
||||
{ file_id: 'duplicate-file' },
|
||||
{ file_id: 'duplicate-file' }, // Duplicate should be ignored
|
||||
{ file_id: 'unique-file' },
|
||||
];
|
||||
|
||||
updateFileUsage.mockImplementation(({ file_id }) => {
|
||||
return Promise.resolve({ file_id, usage: 1 });
|
||||
});
|
||||
|
||||
const result = await processFiles(mockFiles);
|
||||
|
||||
// Should only call updateFileUsage twice (duplicate ignored)
|
||||
expect(updateFileUsage).toHaveBeenCalledTimes(2);
|
||||
expect(result).toEqual([
|
||||
{ file_id: 'duplicate-file', usage: 1 },
|
||||
{ file_id: 'unique-file', usage: 1 },
|
||||
]);
|
||||
expect(result.length).toBe(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle empty files array', async () => {
|
||||
const result = await processFiles([]);
|
||||
expect(result).toEqual([]);
|
||||
expect(updateFileUsage).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle mixed null and undefined returns from updateFileUsage', async () => {
|
||||
const mockFiles = [{ file_id: 'file-1' }, { file_id: 'file-2' }, { file_id: 'file-3' }];
|
||||
|
||||
updateFileUsage.mockImplementation(({ file_id }) => {
|
||||
if (file_id === 'file-1') return Promise.resolve(null);
|
||||
if (file_id === 'file-2') return Promise.resolve(undefined);
|
||||
return Promise.resolve({ file_id, usage: 1 });
|
||||
});
|
||||
|
||||
const result = await processFiles(mockFiles);
|
||||
|
||||
expect(result).toEqual([{ file_id: 'file-3', usage: 1 }]);
|
||||
expect(result).not.toContain(null);
|
||||
expect(result).not.toContain(undefined);
|
||||
expect(result.length).toBe(1);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,9 @@
|
||||
const { FileSources } = require('librechat-data-provider');
|
||||
const { uploadMistralOCR, uploadAzureMistralOCR } = require('@librechat/api');
|
||||
const {
|
||||
uploadMistralOCR,
|
||||
uploadAzureMistralOCR,
|
||||
uploadGoogleVertexMistralOCR,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
getFirebaseURL,
|
||||
prepareImageURL,
|
||||
@@ -222,6 +226,26 @@ const azureMistralOCRStrategy = () => ({
|
||||
handleFileUpload: uploadAzureMistralOCR,
|
||||
});
|
||||
|
||||
const vertexMistralOCRStrategy = () => ({
|
||||
/** @type {typeof saveFileFromURL | null} */
|
||||
saveURL: null,
|
||||
/** @type {typeof getLocalFileURL | null} */
|
||||
getFileURL: null,
|
||||
/** @type {typeof saveLocalBuffer | null} */
|
||||
saveBuffer: null,
|
||||
/** @type {typeof processLocalAvatar | null} */
|
||||
processAvatar: null,
|
||||
/** @type {typeof uploadLocalImage | null} */
|
||||
handleImageUpload: null,
|
||||
/** @type {typeof prepareImagesLocal | null} */
|
||||
prepareImagePayload: null,
|
||||
/** @type {typeof deleteLocalFile | null} */
|
||||
deleteFile: null,
|
||||
/** @type {typeof getLocalFileStream | null} */
|
||||
getDownloadStream: null,
|
||||
handleFileUpload: uploadGoogleVertexMistralOCR,
|
||||
});
|
||||
|
||||
// Strategy Selector
|
||||
const getStrategyFunctions = (fileSource) => {
|
||||
if (fileSource === FileSources.firebase) {
|
||||
@@ -244,6 +268,8 @@ const getStrategyFunctions = (fileSource) => {
|
||||
return mistralOCRStrategy();
|
||||
} else if (fileSource === FileSources.azure_mistral_ocr) {
|
||||
return azureMistralOCRStrategy();
|
||||
} else if (fileSource === FileSources.vertexai_mistral_ocr) {
|
||||
return vertexMistralOCRStrategy();
|
||||
} else {
|
||||
throw new Error('Invalid file source');
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { Strategy: JwtStrategy, ExtractJwt } = require('passport-jwt');
|
||||
const { updateUser, findUser } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
@@ -13,17 +14,23 @@ const { isEnabled } = require('~/server/utils');
|
||||
* The strategy extracts the JWT from the Authorization header as a Bearer token.
|
||||
* The JWT is then verified using the signing key, and the user is retrieved from the database.
|
||||
*/
|
||||
const openIdJwtLogin = (openIdConfig) =>
|
||||
new JwtStrategy(
|
||||
const openIdJwtLogin = (openIdConfig) => {
|
||||
let jwksRsaOptions = {
|
||||
cache: isEnabled(process.env.OPENID_JWKS_URL_CACHE_ENABLED) || true,
|
||||
cacheMaxAge: process.env.OPENID_JWKS_URL_CACHE_TIME
|
||||
? eval(process.env.OPENID_JWKS_URL_CACHE_TIME)
|
||||
: 60000,
|
||||
jwksUri: openIdConfig.serverMetadata().jwks_uri,
|
||||
};
|
||||
|
||||
if (process.env.PROXY) {
|
||||
jwksRsaOptions.requestAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
return new JwtStrategy(
|
||||
{
|
||||
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
|
||||
secretOrKeyProvider: jwksRsa.passportJwtSecret({
|
||||
cache: isEnabled(process.env.OPENID_JWKS_URL_CACHE_ENABLED) || true,
|
||||
cacheMaxAge: process.env.OPENID_JWKS_URL_CACHE_TIME
|
||||
? eval(process.env.OPENID_JWKS_URL_CACHE_TIME)
|
||||
: 60000,
|
||||
jwksUri: openIdConfig.serverMetadata().jwks_uri,
|
||||
}),
|
||||
secretOrKeyProvider: jwksRsa.passportJwtSecret(jwksRsaOptions),
|
||||
},
|
||||
async (payload, done) => {
|
||||
try {
|
||||
@@ -48,5 +55,6 @@ const openIdJwtLogin = (openIdConfig) =>
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
module.exports = openIdJwtLogin;
|
||||
|
||||
@@ -49,7 +49,7 @@ async function customFetch(url, options) {
|
||||
logger.info(`[openidStrategy] proxy agent configured: ${process.env.PROXY}`);
|
||||
fetchOptions = {
|
||||
...options,
|
||||
dispatcher: new HttpsProxyAgent(process.env.PROXY),
|
||||
dispatcher: new undici.ProxyAgent(process.env.PROXY),
|
||||
};
|
||||
}
|
||||
|
||||
@@ -118,7 +118,7 @@ class CustomOpenIDStrategy extends OpenIDStrategy {
|
||||
*/
|
||||
const exchangeAccessTokenIfNeeded = async (config, accessToken, sub, fromCache = false) => {
|
||||
const tokensCache = getLogStores(CacheKeys.OPENID_EXCHANGED_TOKENS);
|
||||
const onBehalfFlowRequired = isEnabled(process.env.OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED);
|
||||
const onBehalfFlowRequired = isEnabled(process.env.OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED);
|
||||
if (onBehalfFlowRequired) {
|
||||
if (fromCache) {
|
||||
const cachedToken = await tokensCache.get(sub);
|
||||
@@ -130,7 +130,7 @@ const exchangeAccessTokenIfNeeded = async (config, accessToken, sub, fromCache =
|
||||
config,
|
||||
'urn:ietf:params:oauth:grant-type:jwt-bearer',
|
||||
{
|
||||
scope: process.env.OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE || 'user.read',
|
||||
scope: process.env.OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE || 'user.read',
|
||||
assertion: accessToken,
|
||||
requested_token_use: 'on_behalf_of',
|
||||
},
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/frontend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"description": "",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
|
||||
37
client/src/Providers/ActivePanelContext.tsx
Normal file
37
client/src/Providers/ActivePanelContext.tsx
Normal file
@@ -0,0 +1,37 @@
|
||||
import { createContext, useContext, useState, ReactNode } from 'react';
|
||||
|
||||
interface ActivePanelContextType {
|
||||
active: string | undefined;
|
||||
setActive: (id: string) => void;
|
||||
}
|
||||
|
||||
const ActivePanelContext = createContext<ActivePanelContextType | undefined>(undefined);
|
||||
|
||||
export function ActivePanelProvider({
|
||||
children,
|
||||
defaultActive,
|
||||
}: {
|
||||
children: ReactNode;
|
||||
defaultActive?: string;
|
||||
}) {
|
||||
const [active, _setActive] = useState<string | undefined>(defaultActive);
|
||||
|
||||
const setActive = (id: string) => {
|
||||
localStorage.setItem('side:active-panel', id);
|
||||
_setActive(id);
|
||||
};
|
||||
|
||||
return (
|
||||
<ActivePanelContext.Provider value={{ active, setActive }}>
|
||||
{children}
|
||||
</ActivePanelContext.Provider>
|
||||
);
|
||||
}
|
||||
|
||||
export function useActivePanel() {
|
||||
const context = useContext(ActivePanelContext);
|
||||
if (context === undefined) {
|
||||
throw new Error('useActivePanel must be used within an ActivePanelProvider');
|
||||
}
|
||||
return context;
|
||||
}
|
||||
@@ -40,41 +40,40 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
|
||||
agent_id: agent_id || '',
|
||||
})) || [];
|
||||
|
||||
const groupedTools =
|
||||
tools?.reduce(
|
||||
(acc, tool) => {
|
||||
if (tool.tool_id.includes(Constants.mcp_delimiter)) {
|
||||
const [_toolName, serverName] = tool.tool_id.split(Constants.mcp_delimiter);
|
||||
const groupKey = `${serverName.toLowerCase()}`;
|
||||
if (!acc[groupKey]) {
|
||||
acc[groupKey] = {
|
||||
tool_id: groupKey,
|
||||
metadata: {
|
||||
name: `${serverName}`,
|
||||
pluginKey: groupKey,
|
||||
description: `${localize('com_ui_tool_collection_prefix')} ${serverName}`,
|
||||
icon: tool.metadata.icon || '',
|
||||
} as TPlugin,
|
||||
agent_id: agent_id || '',
|
||||
tools: [],
|
||||
};
|
||||
}
|
||||
acc[groupKey].tools?.push({
|
||||
tool_id: tool.tool_id,
|
||||
metadata: tool.metadata,
|
||||
agent_id: agent_id || '',
|
||||
});
|
||||
} else {
|
||||
acc[tool.tool_id] = {
|
||||
tool_id: tool.tool_id,
|
||||
metadata: tool.metadata,
|
||||
const groupedTools = tools?.reduce(
|
||||
(acc, tool) => {
|
||||
if (tool.tool_id.includes(Constants.mcp_delimiter)) {
|
||||
const [_toolName, serverName] = tool.tool_id.split(Constants.mcp_delimiter);
|
||||
const groupKey = `${serverName.toLowerCase()}`;
|
||||
if (!acc[groupKey]) {
|
||||
acc[groupKey] = {
|
||||
tool_id: groupKey,
|
||||
metadata: {
|
||||
name: `${serverName}`,
|
||||
pluginKey: groupKey,
|
||||
description: `${localize('com_ui_tool_collection_prefix')} ${serverName}`,
|
||||
icon: tool.metadata.icon || '',
|
||||
} as TPlugin,
|
||||
agent_id: agent_id || '',
|
||||
tools: [],
|
||||
};
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, AgentToolType & { tools?: AgentToolType[] }>,
|
||||
) || {};
|
||||
acc[groupKey].tools?.push({
|
||||
tool_id: tool.tool_id,
|
||||
metadata: tool.metadata,
|
||||
agent_id: agent_id || '',
|
||||
});
|
||||
} else {
|
||||
acc[tool.tool_id] = {
|
||||
tool_id: tool.tool_id,
|
||||
metadata: tool.metadata,
|
||||
agent_id: agent_id || '',
|
||||
};
|
||||
}
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, AgentToolType & { tools?: AgentToolType[] }>,
|
||||
);
|
||||
|
||||
const value = {
|
||||
action,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
export { default as AssistantsProvider } from './AssistantsContext';
|
||||
export { default as AgentsProvider } from './AgentsContext';
|
||||
export { default as ToastProvider } from './ToastContext';
|
||||
export * from './ActivePanelContext';
|
||||
export * from './AgentPanelContext';
|
||||
export * from './ChatContext';
|
||||
export * from './ShareContext';
|
||||
|
||||
@@ -219,11 +219,11 @@ export type AgentPanelContextType = {
|
||||
mcps?: t.MCP[];
|
||||
setMcp: React.Dispatch<React.SetStateAction<t.MCP | undefined>>;
|
||||
setMcps: React.Dispatch<React.SetStateAction<t.MCP[] | undefined>>;
|
||||
groupedTools: Record<string, t.AgentToolType & { tools?: t.AgentToolType[] }>;
|
||||
tools: t.AgentToolType[];
|
||||
activePanel?: string;
|
||||
setActivePanel: React.Dispatch<React.SetStateAction<Panel>>;
|
||||
setCurrentAgentId: React.Dispatch<React.SetStateAction<string | undefined>>;
|
||||
groupedTools?: Record<string, t.AgentToolType & { tools?: t.AgentToolType[] }>;
|
||||
agent_id?: string;
|
||||
};
|
||||
|
||||
@@ -336,6 +336,11 @@ export type TAskProps = {
|
||||
export type TOptions = {
|
||||
editedMessageId?: string | null;
|
||||
editedText?: string | null;
|
||||
editedContent?: {
|
||||
index: number;
|
||||
text: string;
|
||||
type: 'text' | 'think';
|
||||
};
|
||||
isRegenerate?: boolean;
|
||||
isContinued?: boolean;
|
||||
isEdited?: boolean;
|
||||
|
||||
@@ -4,34 +4,41 @@ import {
|
||||
supportsFiles,
|
||||
mergeFileConfig,
|
||||
isAgentsEndpoint,
|
||||
EndpointFileConfig,
|
||||
isAssistantsEndpoint,
|
||||
fileConfig as defaultFileConfig,
|
||||
} from 'librechat-data-provider';
|
||||
import type { EndpointFileConfig } from 'librechat-data-provider';
|
||||
import { useGetFileConfig } from '~/data-provider';
|
||||
import AttachFileMenu from './AttachFileMenu';
|
||||
import { useChatContext } from '~/Providers';
|
||||
import AttachFile from './AttachFile';
|
||||
|
||||
function AttachFileChat({ disableInputs }: { disableInputs: boolean }) {
|
||||
const { conversation } = useChatContext();
|
||||
const conversationId = conversation?.conversationId ?? Constants.NEW_CONVO;
|
||||
const { endpoint: _endpoint, endpointType } = conversation ?? { endpoint: null };
|
||||
const isAgents = useMemo(() => isAgentsEndpoint(_endpoint), [_endpoint]);
|
||||
const { endpoint, endpointType } = conversation ?? { endpoint: null };
|
||||
const isAgents = useMemo(() => isAgentsEndpoint(endpoint), [endpoint]);
|
||||
const isAssistants = useMemo(() => isAssistantsEndpoint(endpoint), [endpoint]);
|
||||
|
||||
const { data: fileConfig = defaultFileConfig } = useGetFileConfig({
|
||||
select: (data) => mergeFileConfig(data),
|
||||
});
|
||||
|
||||
const endpointFileConfig = fileConfig.endpoints[_endpoint ?? ''] as
|
||||
| EndpointFileConfig
|
||||
| undefined;
|
||||
|
||||
const endpointSupportsFiles: boolean = supportsFiles[endpointType ?? _endpoint ?? ''] ?? false;
|
||||
const endpointFileConfig = fileConfig.endpoints[endpoint ?? ''] as EndpointFileConfig | undefined;
|
||||
const endpointSupportsFiles: boolean = supportsFiles[endpointType ?? endpoint ?? ''] ?? false;
|
||||
const isUploadDisabled = (disableInputs || endpointFileConfig?.disabled) ?? false;
|
||||
|
||||
if (isAgents || (endpointSupportsFiles && !isUploadDisabled)) {
|
||||
return <AttachFileMenu disabled={disableInputs} conversationId={conversationId} />;
|
||||
if (isAssistants && endpointSupportsFiles && !isUploadDisabled) {
|
||||
return <AttachFile disabled={disableInputs} />;
|
||||
} else if (isAgents || (endpointSupportsFiles && !isUploadDisabled)) {
|
||||
return (
|
||||
<AttachFileMenu
|
||||
disabled={disableInputs}
|
||||
conversationId={conversationId}
|
||||
endpointFileConfig={endpointFileConfig}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import { useSetRecoilState } from 'recoil';
|
||||
import * as Ariakit from '@ariakit/react';
|
||||
import React, { useRef, useState, useMemo } from 'react';
|
||||
import { FileSearch, ImageUpIcon, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
|
||||
import type { EndpointFileConfig } from 'librechat-data-provider';
|
||||
import { FileUpload, TooltipAnchor, DropdownPopup, AttachmentIcon } from '~/components';
|
||||
import { EToolResources, EModelEndpoint } from 'librechat-data-provider';
|
||||
import { useGetEndpointsQuery } from '~/data-provider';
|
||||
@@ -12,9 +13,10 @@ import { cn } from '~/utils';
|
||||
interface AttachFileMenuProps {
|
||||
conversationId: string;
|
||||
disabled?: boolean | null;
|
||||
endpointFileConfig?: EndpointFileConfig;
|
||||
}
|
||||
|
||||
const AttachFileMenu = ({ disabled, conversationId }: AttachFileMenuProps) => {
|
||||
const AttachFileMenu = ({ disabled, conversationId, endpointFileConfig }: AttachFileMenuProps) => {
|
||||
const localize = useLocalize();
|
||||
const isUploadDisabled = disabled ?? false;
|
||||
const inputRef = useRef<HTMLInputElement>(null);
|
||||
@@ -24,6 +26,7 @@ const AttachFileMenu = ({ disabled, conversationId }: AttachFileMenuProps) => {
|
||||
const { data: endpointsConfig } = useGetEndpointsQuery();
|
||||
const { handleFileChange } = useFileHandling({
|
||||
overrideEndpoint: EModelEndpoint.agents,
|
||||
overrideEndpointFileConfig: endpointFileConfig,
|
||||
});
|
||||
|
||||
/** TODO: Ephemeral Agent Capabilities
|
||||
|
||||
@@ -81,14 +81,23 @@ const ContentParts = memo(
|
||||
return (
|
||||
<>
|
||||
{content.map((part, idx) => {
|
||||
if (part?.type !== ContentTypes.TEXT || typeof part.text !== 'string') {
|
||||
if (!part) {
|
||||
return null;
|
||||
}
|
||||
const isTextPart =
|
||||
part?.type === ContentTypes.TEXT ||
|
||||
typeof (part as unknown as Agents.MessageContentText)?.text !== 'string';
|
||||
const isThinkPart =
|
||||
part?.type === ContentTypes.THINK ||
|
||||
typeof (part as unknown as Agents.ReasoningDeltaUpdate)?.think !== 'string';
|
||||
if (!isTextPart && !isThinkPart) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<EditTextPart
|
||||
index={idx}
|
||||
text={part.text}
|
||||
part={part as Agents.MessageContentText | Agents.ReasoningDeltaUpdate}
|
||||
messageId={messageId}
|
||||
isSubmitting={isSubmitting}
|
||||
enterEdit={enterEdit}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useForm } from 'react-hook-form';
|
||||
import { ContentTypes } from 'librechat-data-provider';
|
||||
import { useRecoilState, useRecoilValue } from 'recoil';
|
||||
import { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useUpdateMessageContentMutation } from 'librechat-data-provider/react-query';
|
||||
import type { Agents } from 'librechat-data-provider';
|
||||
import type { TEditProps } from '~/common';
|
||||
import Container from '~/components/Chat/Messages/Content/Container';
|
||||
import { useChatContext, useAddedChatContext } from '~/Providers';
|
||||
@@ -12,18 +13,19 @@ import { useLocalize } from '~/hooks';
|
||||
import store from '~/store';
|
||||
|
||||
const EditTextPart = ({
|
||||
text,
|
||||
part,
|
||||
index,
|
||||
messageId,
|
||||
isSubmitting,
|
||||
enterEdit,
|
||||
}: Omit<TEditProps, 'message' | 'ask'> & {
|
||||
}: Omit<TEditProps, 'message' | 'ask' | 'text'> & {
|
||||
index: number;
|
||||
messageId: string;
|
||||
part: Agents.MessageContentText | Agents.ReasoningDeltaUpdate;
|
||||
}) => {
|
||||
const localize = useLocalize();
|
||||
const { addedIndex } = useAddedChatContext();
|
||||
const { getMessages, setMessages, conversation } = useChatContext();
|
||||
const { ask, getMessages, setMessages, conversation } = useChatContext();
|
||||
const [latestMultiMessage, setLatestMultiMessage] = useRecoilState(
|
||||
store.latestMessageFamily(addedIndex),
|
||||
);
|
||||
@@ -34,15 +36,16 @@ const EditTextPart = ({
|
||||
[getMessages, messageId],
|
||||
);
|
||||
|
||||
const chatDirection = useRecoilValue(store.chatDirection);
|
||||
|
||||
const textAreaRef = useRef<HTMLTextAreaElement | null>(null);
|
||||
const updateMessageContentMutation = useUpdateMessageContentMutation(conversationId ?? '');
|
||||
|
||||
const chatDirection = useRecoilValue(store.chatDirection).toLowerCase();
|
||||
const isRTL = chatDirection === 'rtl';
|
||||
const isRTL = chatDirection?.toLowerCase() === 'rtl';
|
||||
|
||||
const { register, handleSubmit, setValue } = useForm({
|
||||
defaultValues: {
|
||||
text: text ?? '',
|
||||
text: (ContentTypes.THINK in part ? part.think : part.text) || '',
|
||||
},
|
||||
});
|
||||
|
||||
@@ -55,15 +58,7 @@ const EditTextPart = ({
|
||||
}
|
||||
}, []);
|
||||
|
||||
/*
|
||||
const resubmitMessage = () => {
|
||||
showToast({
|
||||
status: 'warning',
|
||||
message: localize('com_warning_resubmit_unsupported'),
|
||||
});
|
||||
|
||||
// const resubmitMessage = (data: { text: string }) => {
|
||||
// Not supported by AWS Bedrock
|
||||
const resubmitMessage = (data: { text: string }) => {
|
||||
const messages = getMessages();
|
||||
const parentMessage = messages?.find((msg) => msg.messageId === message?.parentMessageId);
|
||||
|
||||
@@ -73,17 +68,19 @@ const EditTextPart = ({
|
||||
ask(
|
||||
{ ...parentMessage },
|
||||
{
|
||||
editedText: data.text,
|
||||
editedContent: {
|
||||
index,
|
||||
text: data.text,
|
||||
type: part.type,
|
||||
},
|
||||
editedMessageId: messageId,
|
||||
isRegenerate: true,
|
||||
isEdited: true,
|
||||
},
|
||||
);
|
||||
|
||||
setSiblingIdx((siblingIdx ?? 0) - 1);
|
||||
enterEdit(true);
|
||||
};
|
||||
*/
|
||||
|
||||
const updateMessage = (data: { text: string }) => {
|
||||
const messages = getMessages();
|
||||
@@ -167,13 +164,13 @@ const EditTextPart = ({
|
||||
/>
|
||||
</div>
|
||||
<div className="mt-2 flex w-full justify-center text-center">
|
||||
{/* <button
|
||||
<button
|
||||
className="btn btn-primary relative mr-2"
|
||||
disabled={isSubmitting}
|
||||
onClick={handleSubmit(resubmitMessage)}
|
||||
>
|
||||
{localize('com_ui_save_submit')}
|
||||
</button> */}
|
||||
</button>
|
||||
<button
|
||||
className="btn btn-secondary relative mr-2"
|
||||
disabled={isSubmitting}
|
||||
|
||||
@@ -62,6 +62,7 @@ const errorMessages = {
|
||||
const { info } = json;
|
||||
return info;
|
||||
},
|
||||
[ErrorTypes.GOOGLE_TOOL_CONFLICT]: 'com_error_google_tool_conflict',
|
||||
[ViolationTypes.BAN]:
|
||||
'Your account has been temporarily banned due to violations of our service.',
|
||||
invalid_api_key:
|
||||
|
||||
@@ -168,7 +168,7 @@ export default function AgentConfig({
|
||||
const visibleToolIds = new Set(selectedToolIds);
|
||||
|
||||
// Check what group parent tools should be shown if any subtool is present
|
||||
Object.entries(allTools).forEach(([toolId, toolObj]) => {
|
||||
Object.entries(allTools ?? {}).forEach(([toolId, toolObj]) => {
|
||||
if (toolObj.tools?.length) {
|
||||
// if any subtool of this group is selected, ensure group parent tool rendered
|
||||
if (toolObj.tools.some((st) => selectedToolIds.includes(st.tool_id))) {
|
||||
@@ -299,6 +299,7 @@ export default function AgentConfig({
|
||||
<div className="mb-1">
|
||||
{/* // Render all visible IDs (including groups with subtools selected) */}
|
||||
{[...visibleToolIds].map((toolId, i) => {
|
||||
if (!allTools) return null;
|
||||
const tool = allTools[toolId];
|
||||
if (!tool) return null;
|
||||
return (
|
||||
|
||||
@@ -19,7 +19,7 @@ export default function AgentTool({
|
||||
allTools,
|
||||
}: {
|
||||
tool: string;
|
||||
allTools: Record<string, AgentToolType & { tools?: AgentToolType[] }>;
|
||||
allTools?: Record<string, AgentToolType & { tools?: AgentToolType[] }>;
|
||||
agent_id?: string;
|
||||
}) {
|
||||
const [isHovering, setIsHovering] = useState(false);
|
||||
@@ -30,8 +30,10 @@ export default function AgentTool({
|
||||
const { showToast } = useToastContext();
|
||||
const updateUserPlugins = useUpdateUserPluginsMutation();
|
||||
const { getValues, setValue } = useFormContext<AgentForm>();
|
||||
if (!allTools) {
|
||||
return null;
|
||||
}
|
||||
const currentTool = allTools[tool];
|
||||
|
||||
const getSelectedTools = () => {
|
||||
if (!currentTool?.tools) return [];
|
||||
const formTools = getValues('tools') || [];
|
||||
@@ -224,7 +226,7 @@ export default function AgentTool({
|
||||
}}
|
||||
className={cn(
|
||||
'h-4 w-4 rounded border border-gray-300 transition-all duration-200 hover:border-gray-400 dark:border-gray-600 dark:hover:border-gray-500',
|
||||
isExpanded ? 'opacity-100' : 'opacity-0',
|
||||
isExpanded ? 'visible' : 'pointer-events-none invisible',
|
||||
)}
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
onKeyDown={(e) => {
|
||||
|
||||
@@ -17,9 +17,9 @@ import {
|
||||
} from '~/data-provider';
|
||||
import { cn, cardStyle, defaultTextProps, removeFocusOutlines } from '~/utils';
|
||||
import AssistantConversationStarters from './AssistantConversationStarters';
|
||||
import AssistantToolsDialog from '~/components/Tools/AssistantToolsDialog';
|
||||
import { useAssistantsMapContext, useToastContext } from '~/Providers';
|
||||
import { useSelectAssistant, useLocalize } from '~/hooks';
|
||||
import { ToolSelectDialog } from '~/components/Tools';
|
||||
import AppendDateCheckbox from './AppendDateCheckbox';
|
||||
import CapabilitiesForm from './CapabilitiesForm';
|
||||
import { SelectDropDown } from '~/components/ui';
|
||||
@@ -468,11 +468,10 @@ export default function AssistantPanel({
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<ToolSelectDialog
|
||||
<AssistantToolsDialog
|
||||
endpoint={endpoint}
|
||||
isOpen={showToolDialog}
|
||||
setIsOpen={setShowToolDialog}
|
||||
toolsFormKey="functions"
|
||||
endpoint={endpoint}
|
||||
/>
|
||||
</form>
|
||||
</FormProvider>
|
||||
|
||||
@@ -1,21 +1,15 @@
|
||||
import { useState } from 'react';
|
||||
import * as AccordionPrimitive from '@radix-ui/react-accordion';
|
||||
import type { NavLink, NavProps } from '~/common';
|
||||
import { Accordion, AccordionItem, AccordionContent } from '~/components/ui/Accordion';
|
||||
import { TooltipAnchor, Button } from '~/components';
|
||||
import { AccordionContent, AccordionItem, TooltipAnchor, Accordion, Button } from '~/components/ui';
|
||||
import { ActivePanelProvider, useActivePanel } from '~/Providers';
|
||||
import { useLocalize } from '~/hooks';
|
||||
import { cn } from '~/utils';
|
||||
|
||||
export default function Nav({ links, isCollapsed, resize, defaultActive }: NavProps) {
|
||||
function NavContent({ links, isCollapsed, resize }: Omit<NavProps, 'defaultActive'>) {
|
||||
const localize = useLocalize();
|
||||
const [active, _setActive] = useState<string | undefined>(defaultActive);
|
||||
const { active, setActive } = useActivePanel();
|
||||
const getVariant = (link: NavLink) => (link.id === active ? 'default' : 'ghost');
|
||||
|
||||
const setActive = (id: string) => {
|
||||
localStorage.setItem('side:active-panel', id + '');
|
||||
_setActive(id);
|
||||
};
|
||||
|
||||
return (
|
||||
<div
|
||||
data-collapsed={isCollapsed}
|
||||
@@ -105,3 +99,11 @@ export default function Nav({ links, isCollapsed, resize, defaultActive }: NavPr
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
export default function Nav({ links, isCollapsed, resize, defaultActive }: NavProps) {
|
||||
return (
|
||||
<ActivePanelProvider defaultActive={defaultActive}>
|
||||
<NavContent links={links} isCollapsed={isCollapsed} resize={resize} />
|
||||
</ActivePanelProvider>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ function DynamicSlider({
|
||||
setOption,
|
||||
optionType,
|
||||
options,
|
||||
enumMappings,
|
||||
readonly = false,
|
||||
showDefault = false,
|
||||
includeInput = true,
|
||||
@@ -60,24 +61,68 @@ function DynamicSlider({
|
||||
|
||||
const enumToNumeric = useMemo(() => {
|
||||
if (isEnum && options) {
|
||||
return options.reduce((acc, mapping, index) => {
|
||||
acc[mapping] = index;
|
||||
return acc;
|
||||
}, {} as Record<string, number>);
|
||||
return options.reduce(
|
||||
(acc, mapping, index) => {
|
||||
acc[mapping] = index;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, number>,
|
||||
);
|
||||
}
|
||||
return {};
|
||||
}, [isEnum, options]);
|
||||
|
||||
const valueToEnumOption = useMemo(() => {
|
||||
if (isEnum && options) {
|
||||
return options.reduce((acc, option, index) => {
|
||||
acc[index] = option;
|
||||
return acc;
|
||||
}, {} as Record<number, string>);
|
||||
return options.reduce(
|
||||
(acc, option, index) => {
|
||||
acc[index] = option;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<number, string>,
|
||||
);
|
||||
}
|
||||
return {};
|
||||
}, [isEnum, options]);
|
||||
|
||||
const getDisplayValue = useCallback(
|
||||
(value: string | number | undefined | null): string => {
|
||||
if (isEnum && enumMappings && value != null) {
|
||||
const stringValue = String(value);
|
||||
// Check if the value exists in enumMappings
|
||||
if (stringValue in enumMappings) {
|
||||
const mappedValue = String(enumMappings[stringValue]);
|
||||
// Check if the mapped value is a localization key
|
||||
if (mappedValue.startsWith('com_')) {
|
||||
return localize(mappedValue as TranslationKeys) ?? mappedValue;
|
||||
}
|
||||
return mappedValue;
|
||||
}
|
||||
}
|
||||
// Always return a string for Input component compatibility
|
||||
if (value != null) {
|
||||
return String(value);
|
||||
}
|
||||
return String(defaultValue ?? '');
|
||||
},
|
||||
[isEnum, enumMappings, defaultValue, localize],
|
||||
);
|
||||
|
||||
const getDefaultDisplayValue = useCallback((): string => {
|
||||
if (defaultValue != null && enumMappings) {
|
||||
const stringDefault = String(defaultValue);
|
||||
if (stringDefault in enumMappings) {
|
||||
const mappedValue = String(enumMappings[stringDefault]);
|
||||
// Check if the mapped value is a localization key
|
||||
if (mappedValue.startsWith('com_')) {
|
||||
return localize(mappedValue as TranslationKeys) ?? mappedValue;
|
||||
}
|
||||
return mappedValue;
|
||||
}
|
||||
}
|
||||
return String(defaultValue ?? '');
|
||||
}, [defaultValue, enumMappings, localize]);
|
||||
|
||||
const handleValueChange = useCallback(
|
||||
(value: number) => {
|
||||
if (isEnum) {
|
||||
@@ -115,12 +160,12 @@ function DynamicSlider({
|
||||
<div className="flex w-full items-center justify-between">
|
||||
<Label
|
||||
htmlFor={`${settingKey}-dynamic-setting`}
|
||||
className="text-left text-sm font-medium"
|
||||
className="break-words text-left text-sm font-medium"
|
||||
>
|
||||
{labelCode ? localize(label as TranslationKeys) ?? label : label || settingKey}{' '}
|
||||
{labelCode ? (localize(label as TranslationKeys) ?? label) : label || settingKey}{' '}
|
||||
{showDefault && (
|
||||
<small className="opacity-40">
|
||||
({localize('com_endpoint_default')}: {defaultValue})
|
||||
({localize('com_endpoint_default')}: {getDefaultDisplayValue()})
|
||||
</small>
|
||||
)}
|
||||
</Label>
|
||||
@@ -132,13 +177,13 @@ function DynamicSlider({
|
||||
onChange={(value) => setInputValue(Number(value))}
|
||||
max={range ? range.max : (options?.length ?? 0) - 1}
|
||||
min={range ? range.min : 0}
|
||||
step={range ? range.step ?? 1 : 1}
|
||||
step={range ? (range.step ?? 1) : 1}
|
||||
controls={false}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
@@ -146,13 +191,13 @@ function DynamicSlider({
|
||||
<Input
|
||||
id={`${settingKey}-dynamic-setting-input`}
|
||||
disabled={readonly}
|
||||
value={selectedValue ?? defaultValue}
|
||||
value={getDisplayValue(selectedValue)}
|
||||
onChange={() => ({})}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
'reset-rc-number-input h-auto w-14 border-0 py-1 pl-1 text-center text-xs group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
@@ -164,19 +209,23 @@ function DynamicSlider({
|
||||
value={[
|
||||
isEnum
|
||||
? enumToNumeric[(selectedValue as number) ?? '']
|
||||
: (inputValue as number) ?? (defaultValue as number),
|
||||
: ((inputValue as number) ?? (defaultValue as number)),
|
||||
]}
|
||||
onValueChange={(value) => handleValueChange(value[0])}
|
||||
onDoubleClick={() => setInputValue(defaultValue as string | number)}
|
||||
max={max}
|
||||
min={range ? range.min : 0}
|
||||
step={range ? range.step ?? 1 : 1}
|
||||
step={range ? (range.step ?? 1) : 1}
|
||||
className="flex h-4 w-full"
|
||||
/>
|
||||
</HoverCardTrigger>
|
||||
{description && (
|
||||
<OptionHover
|
||||
description={descriptionCode ? localize(description as TranslationKeys) ?? description : description}
|
||||
description={
|
||||
descriptionCode
|
||||
? (localize(description as TranslationKeys) ?? description)
|
||||
: description
|
||||
}
|
||||
side={ESide.Left}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -50,7 +50,7 @@ function DynamicSwitch({
|
||||
<div className="flex justify-between">
|
||||
<Label
|
||||
htmlFor={`${settingKey}-dynamic-switch`}
|
||||
className="text-left text-sm font-medium"
|
||||
className="break-words text-left text-sm font-medium"
|
||||
>
|
||||
{labelCode ? (localize(label as TranslationKeys) ?? label) : label || settingKey}{' '}
|
||||
{showDefault && (
|
||||
|
||||
254
client/src/components/Tools/AssistantToolsDialog.tsx
Normal file
254
client/src/components/Tools/AssistantToolsDialog.tsx
Normal file
@@ -0,0 +1,254 @@
|
||||
import { useEffect } from 'react';
|
||||
import { Search, X } from 'lucide-react';
|
||||
import { Dialog, DialogPanel, DialogTitle, Description } from '@headlessui/react';
|
||||
import { useFormContext } from 'react-hook-form';
|
||||
import { isAgentsEndpoint } from 'librechat-data-provider';
|
||||
import { useUpdateUserPluginsMutation } from 'librechat-data-provider/react-query';
|
||||
import type {
|
||||
AssistantsEndpoint,
|
||||
EModelEndpoint,
|
||||
TPluginAction,
|
||||
TError,
|
||||
} from 'librechat-data-provider';
|
||||
import type { TPluginStoreDialogProps } from '~/common/types';
|
||||
import { PluginPagination, PluginAuthForm } from '~/components/Plugins/Store';
|
||||
import { useLocalize, usePluginDialogHelpers } from '~/hooks';
|
||||
import { useAvailableToolsQuery } from '~/data-provider';
|
||||
import ToolItem from './ToolItem';
|
||||
|
||||
function AssistantToolsDialog({
|
||||
isOpen,
|
||||
endpoint,
|
||||
setIsOpen,
|
||||
}: TPluginStoreDialogProps & {
|
||||
endpoint: AssistantsEndpoint | EModelEndpoint.agents;
|
||||
}) {
|
||||
const localize = useLocalize();
|
||||
const { getValues, setValue } = useFormContext();
|
||||
const { data: tools } = useAvailableToolsQuery(endpoint);
|
||||
const isAgentTools = isAgentsEndpoint(endpoint);
|
||||
|
||||
const {
|
||||
maxPage,
|
||||
setMaxPage,
|
||||
currentPage,
|
||||
setCurrentPage,
|
||||
itemsPerPage,
|
||||
searchChanged,
|
||||
setSearchChanged,
|
||||
searchValue,
|
||||
setSearchValue,
|
||||
gridRef,
|
||||
handleSearch,
|
||||
handleChangePage,
|
||||
error,
|
||||
setError,
|
||||
errorMessage,
|
||||
setErrorMessage,
|
||||
showPluginAuthForm,
|
||||
setShowPluginAuthForm,
|
||||
selectedPlugin,
|
||||
setSelectedPlugin,
|
||||
} = usePluginDialogHelpers();
|
||||
|
||||
const updateUserPlugins = useUpdateUserPluginsMutation();
|
||||
const handleInstallError = (error: TError) => {
|
||||
setError(true);
|
||||
const errorMessage = error.response?.data?.message ?? '';
|
||||
if (errorMessage) {
|
||||
setErrorMessage(errorMessage);
|
||||
}
|
||||
setTimeout(() => {
|
||||
setError(false);
|
||||
setErrorMessage('');
|
||||
}, 5000);
|
||||
};
|
||||
|
||||
const handleInstall = (pluginAction: TPluginAction) => {
|
||||
const addFunction = () => {
|
||||
const fns = getValues('functions').slice();
|
||||
fns.push(pluginAction.pluginKey);
|
||||
setValue('functions', fns);
|
||||
};
|
||||
|
||||
if (!pluginAction.auth) {
|
||||
return addFunction();
|
||||
}
|
||||
|
||||
updateUserPlugins.mutate(pluginAction, {
|
||||
onError: (error: unknown) => {
|
||||
handleInstallError(error as TError);
|
||||
},
|
||||
onSuccess: addFunction,
|
||||
});
|
||||
|
||||
setShowPluginAuthForm(false);
|
||||
};
|
||||
|
||||
const onRemoveTool = (tool: string) => {
|
||||
setShowPluginAuthForm(false);
|
||||
updateUserPlugins.mutate(
|
||||
{ pluginKey: tool, action: 'uninstall', auth: null, isEntityTool: true },
|
||||
{
|
||||
onError: (error: unknown) => {
|
||||
handleInstallError(error as TError);
|
||||
},
|
||||
onSuccess: () => {
|
||||
const fns = getValues('functions').filter((fn: string) => fn !== tool);
|
||||
setValue('functions', fns);
|
||||
},
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
const onAddTool = (pluginKey: string) => {
|
||||
setShowPluginAuthForm(false);
|
||||
const getAvailablePluginFromKey = tools?.find((p) => p.pluginKey === pluginKey);
|
||||
setSelectedPlugin(getAvailablePluginFromKey);
|
||||
|
||||
const { authConfig, authenticated = false } = getAvailablePluginFromKey ?? {};
|
||||
|
||||
if (authConfig && authConfig.length > 0 && !authenticated) {
|
||||
setShowPluginAuthForm(true);
|
||||
} else {
|
||||
handleInstall({ pluginKey, action: 'install', auth: null });
|
||||
}
|
||||
};
|
||||
|
||||
const filteredTools = tools?.filter((tool) =>
|
||||
tool.name.toLowerCase().includes(searchValue.toLowerCase()),
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (filteredTools) {
|
||||
setMaxPage(Math.ceil(filteredTools.length / itemsPerPage));
|
||||
if (searchChanged) {
|
||||
setCurrentPage(1);
|
||||
setSearchChanged(false);
|
||||
}
|
||||
}
|
||||
}, [
|
||||
tools,
|
||||
itemsPerPage,
|
||||
searchValue,
|
||||
filteredTools,
|
||||
searchChanged,
|
||||
setMaxPage,
|
||||
setCurrentPage,
|
||||
setSearchChanged,
|
||||
]);
|
||||
|
||||
return (
|
||||
<Dialog
|
||||
open={isOpen}
|
||||
onClose={() => {
|
||||
setIsOpen(false);
|
||||
setCurrentPage(1);
|
||||
setSearchValue('');
|
||||
}}
|
||||
className="relative z-[102]"
|
||||
>
|
||||
{/* The backdrop, rendered as a fixed sibling to the panel container */}
|
||||
<div className="fixed inset-0 bg-surface-primary opacity-60 transition-opacity dark:opacity-80" />
|
||||
{/* Full-screen container to center the panel */}
|
||||
<div className="fixed inset-0 flex items-center justify-center p-4">
|
||||
<DialogPanel
|
||||
className="relative w-full transform overflow-hidden overflow-y-auto rounded-lg bg-surface-secondary text-left shadow-xl transition-all max-sm:h-full sm:mx-7 sm:my-8 sm:max-w-2xl lg:max-w-5xl xl:max-w-7xl"
|
||||
style={{ minHeight: '610px' }}
|
||||
>
|
||||
<div className="flex items-center justify-between border-b-[1px] border-border-medium px-4 pb-4 pt-5 sm:p-6">
|
||||
<div className="flex items-center">
|
||||
<div className="text-center sm:text-left">
|
||||
<DialogTitle className="text-lg font-medium leading-6 text-text-primary">
|
||||
{isAgentTools
|
||||
? localize('com_nav_tool_dialog_agents')
|
||||
: localize('com_nav_tool_dialog')}
|
||||
</DialogTitle>
|
||||
<Description className="text-sm text-text-secondary">
|
||||
{localize('com_nav_tool_dialog_description')}
|
||||
</Description>
|
||||
</div>
|
||||
</div>
|
||||
<div>
|
||||
<div className="sm:mt-0">
|
||||
<button
|
||||
onClick={() => {
|
||||
setIsOpen(false);
|
||||
setCurrentPage(1);
|
||||
}}
|
||||
className="inline-block rounded-full text-text-secondary transition-colors hover:text-text-primary"
|
||||
aria-label="Close dialog"
|
||||
type="button"
|
||||
>
|
||||
<X aria-hidden="true" />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{error && (
|
||||
<div
|
||||
className="relative m-4 rounded border border-red-400 bg-red-100 px-4 py-3 text-red-700"
|
||||
role="alert"
|
||||
>
|
||||
{localize('com_nav_plugin_auth_error')} {errorMessage}
|
||||
</div>
|
||||
)}
|
||||
{showPluginAuthForm && (
|
||||
<div className="p-4 sm:p-6 sm:pt-4">
|
||||
<PluginAuthForm
|
||||
plugin={selectedPlugin}
|
||||
onSubmit={(installActionData: TPluginAction) => handleInstall(installActionData)}
|
||||
isEntityTool={true}
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<div className="p-4 sm:p-6 sm:pt-4">
|
||||
<div className="mt-4 flex flex-col gap-4">
|
||||
<div className="flex items-center justify-center space-x-4">
|
||||
<Search className="h-6 w-6 text-text-tertiary" />
|
||||
<input
|
||||
type="text"
|
||||
value={searchValue}
|
||||
onChange={handleSearch}
|
||||
placeholder={localize('com_nav_tool_search')}
|
||||
className="w-64 rounded border border-border-medium bg-transparent px-2 py-1 text-text-primary focus:outline-none"
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
ref={gridRef}
|
||||
className="grid grid-cols-1 grid-rows-2 gap-3 sm:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4"
|
||||
style={{ minHeight: '410px' }}
|
||||
>
|
||||
{filteredTools &&
|
||||
filteredTools
|
||||
.slice((currentPage - 1) * itemsPerPage, currentPage * itemsPerPage)
|
||||
.map((tool, index) => (
|
||||
<ToolItem
|
||||
key={index}
|
||||
tool={tool}
|
||||
isInstalled={getValues('functions').includes(tool.pluginKey)}
|
||||
onAddTool={() => onAddTool(tool.pluginKey)}
|
||||
onRemoveTool={() => onRemoveTool(tool.pluginKey)}
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
<div className="mt-2 flex flex-col items-center gap-2 sm:flex-row sm:justify-between">
|
||||
{maxPage > 0 ? (
|
||||
<PluginPagination
|
||||
currentPage={currentPage}
|
||||
maxPage={maxPage}
|
||||
onChangePage={handleChangePage}
|
||||
/>
|
||||
) : (
|
||||
<div style={{ height: '21px' }}></div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</DialogPanel>
|
||||
</div>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
||||
|
||||
export default AssistantToolsDialog;
|
||||
@@ -1,9 +1,9 @@
|
||||
import { XCircle, PlusCircleIcon, Wrench } from 'lucide-react';
|
||||
import { AgentToolType } from 'librechat-data-provider';
|
||||
import type { TPlugin, AgentToolType } from 'librechat-data-provider';
|
||||
import { useLocalize } from '~/hooks';
|
||||
|
||||
type ToolItemProps = {
|
||||
tool: AgentToolType;
|
||||
tool: TPlugin | AgentToolType;
|
||||
onAddTool: () => void;
|
||||
onRemoveTool: () => void;
|
||||
isInstalled?: boolean;
|
||||
@@ -19,9 +19,13 @@ function ToolItem({ tool, onAddTool, onRemoveTool, isInstalled = false }: ToolIt
|
||||
}
|
||||
};
|
||||
|
||||
const name = tool.metadata?.name || tool.tool_id;
|
||||
const description = tool.metadata?.description || '';
|
||||
const icon = tool.metadata?.icon;
|
||||
const name =
|
||||
(tool as AgentToolType).metadata?.name ||
|
||||
(tool as AgentToolType).tool_id ||
|
||||
(tool as TPlugin).name;
|
||||
const description =
|
||||
(tool as AgentToolType).metadata?.description || (tool as TPlugin).description || '';
|
||||
const icon = (tool as AgentToolType).metadata?.icon || (tool as TPlugin).icon;
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-4 rounded border border-border-medium bg-transparent p-6">
|
||||
|
||||
@@ -67,15 +67,14 @@ function ToolSelectDialog({
|
||||
}, 5000);
|
||||
};
|
||||
|
||||
const toolsFormKey = 'tools';
|
||||
const handleInstall = (pluginAction: TPluginAction) => {
|
||||
const addFunction = () => {
|
||||
const installedToolIds: string[] = getValues(toolsFormKey) || [];
|
||||
const installedToolIds: string[] = getValues('tools') || [];
|
||||
// Add the parent
|
||||
installedToolIds.push(pluginAction.pluginKey);
|
||||
|
||||
// If this tool is a group, add subtools too
|
||||
const groupObj = groupedTools[pluginAction.pluginKey];
|
||||
const groupObj = groupedTools?.[pluginAction.pluginKey];
|
||||
if (groupObj?.tools && groupObj.tools.length > 0) {
|
||||
for (const sub of groupObj.tools) {
|
||||
if (!installedToolIds.includes(sub.tool_id)) {
|
||||
@@ -83,7 +82,7 @@ function ToolSelectDialog({
|
||||
}
|
||||
}
|
||||
}
|
||||
setValue(toolsFormKey, Array.from(new Set(installedToolIds))); // no duplicates just in case
|
||||
setValue('tools', Array.from(new Set(installedToolIds))); // no duplicates just in case
|
||||
};
|
||||
|
||||
if (!pluginAction.auth) {
|
||||
@@ -101,7 +100,7 @@ function ToolSelectDialog({
|
||||
};
|
||||
|
||||
const onRemoveTool = (toolId: string) => {
|
||||
const groupObj = groupedTools[toolId];
|
||||
const groupObj = groupedTools?.[toolId];
|
||||
const toolIdsToRemove = [toolId];
|
||||
if (groupObj?.tools && groupObj.tools.length > 0) {
|
||||
toolIdsToRemove.push(...groupObj.tools.map((sub) => sub.tool_id));
|
||||
@@ -113,8 +112,8 @@ function ToolSelectDialog({
|
||||
onError: (error: unknown) => handleInstallError(error as TError),
|
||||
onSuccess: () => {
|
||||
const remainingToolIds =
|
||||
getValues(toolsFormKey)?.filter((toolId) => !toolIdsToRemove.includes(toolId)) || [];
|
||||
setValue(toolsFormKey, remainingToolIds);
|
||||
getValues('tools')?.filter((toolId) => !toolIdsToRemove.includes(toolId)) || [];
|
||||
setValue('tools', remainingToolIds);
|
||||
},
|
||||
},
|
||||
);
|
||||
@@ -268,7 +267,7 @@ function ToolSelectDialog({
|
||||
<ToolItem
|
||||
key={index}
|
||||
tool={tool}
|
||||
isInstalled={getValues(toolsFormKey)?.includes(tool.tool_id) || false}
|
||||
isInstalled={getValues('tools')?.includes(tool.tool_id) || false}
|
||||
onAddTool={() => onAddTool(tool.tool_id)}
|
||||
onRemoveTool={() => onRemoveTool(tool.tool_id)}
|
||||
/>
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
export * from './Accordion';
|
||||
export * from './AnimatedTabs';
|
||||
export * from './AlertDialog';
|
||||
export * from './Breadcrumb';
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
QueryKeys,
|
||||
ContentTypes,
|
||||
EModelEndpoint,
|
||||
isAgentsEndpoint,
|
||||
parseCompactConvo,
|
||||
replaceSpecialVars,
|
||||
isAssistantsEndpoint,
|
||||
@@ -36,15 +37,6 @@ const logChatRequest = (request: Record<string, unknown>) => {
|
||||
logger.log('=====================================');
|
||||
};
|
||||
|
||||
const usesContentStream = (endpoint: EModelEndpoint | undefined, endpointType?: string) => {
|
||||
if (endpointType === EModelEndpoint.custom) {
|
||||
return true;
|
||||
}
|
||||
if (endpoint === EModelEndpoint.openAI || endpoint === EModelEndpoint.azureOpenAI) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
export default function useChatFunctions({
|
||||
index = 0,
|
||||
files,
|
||||
@@ -93,7 +85,7 @@ export default function useChatFunctions({
|
||||
messageId = null,
|
||||
},
|
||||
{
|
||||
editedText = null,
|
||||
editedContent = null,
|
||||
editedMessageId = null,
|
||||
isResubmission = false,
|
||||
isRegenerate = false,
|
||||
@@ -245,14 +237,11 @@ export default function useChatFunctions({
|
||||
setFilesToDelete({});
|
||||
}
|
||||
|
||||
const generation = editedText ?? latestMessage?.text ?? '';
|
||||
const responseText = isEditOrContinue ? generation : '';
|
||||
|
||||
const responseMessageId =
|
||||
editedMessageId ?? (latestMessage?.messageId ? latestMessage?.messageId + '_' : null) ?? null;
|
||||
const initialResponse: TMessage = {
|
||||
sender: responseSender,
|
||||
text: responseText,
|
||||
text: '',
|
||||
endpoint: endpoint ?? '',
|
||||
parentMessageId: isRegenerate ? messageId : intermediateId,
|
||||
messageId: responseMessageId ?? `${isRegenerate ? messageId : intermediateId}_`,
|
||||
@@ -272,34 +261,37 @@ export default function useChatFunctions({
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: responseText,
|
||||
value: '',
|
||||
},
|
||||
},
|
||||
];
|
||||
} else if (endpoint === EModelEndpoint.agents) {
|
||||
initialResponse.model = conversation?.agent_id ?? '';
|
||||
} else if (endpoint != null) {
|
||||
initialResponse.model = isAgentsEndpoint(endpoint)
|
||||
? (conversation?.agent_id ?? '')
|
||||
: (conversation?.model ?? '');
|
||||
initialResponse.text = '';
|
||||
initialResponse.content = [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: responseText,
|
||||
|
||||
if (editedContent && latestMessage?.content) {
|
||||
initialResponse.content = cloneDeep(latestMessage.content);
|
||||
const { index, text, type } = editedContent;
|
||||
if (initialResponse.content && index >= 0 && index < initialResponse.content.length) {
|
||||
const contentPart = initialResponse.content[index];
|
||||
if (type === ContentTypes.THINK && contentPart.type === ContentTypes.THINK) {
|
||||
contentPart[ContentTypes.THINK] = text;
|
||||
} else if (type === ContentTypes.TEXT && contentPart.type === ContentTypes.TEXT) {
|
||||
contentPart[ContentTypes.TEXT] = text;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
initialResponse.content = [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: '',
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
setShowStopButton(true);
|
||||
} else if (usesContentStream(endpoint, endpointType)) {
|
||||
initialResponse.text = '';
|
||||
initialResponse.content = [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: responseText,
|
||||
},
|
||||
},
|
||||
];
|
||||
setShowStopButton(true);
|
||||
} else {
|
||||
];
|
||||
}
|
||||
setShowStopButton(true);
|
||||
}
|
||||
|
||||
@@ -316,7 +308,6 @@ export default function useChatFunctions({
|
||||
endpointOption,
|
||||
userMessage: {
|
||||
...currentMsg,
|
||||
generation,
|
||||
responseMessageId,
|
||||
overrideParentMessageId: isRegenerate ? messageId : null,
|
||||
},
|
||||
@@ -328,6 +319,7 @@ export default function useChatFunctions({
|
||||
initialResponse,
|
||||
isTemporary,
|
||||
ephemeralAgent,
|
||||
editedContent,
|
||||
};
|
||||
|
||||
if (isRegenerate) {
|
||||
|
||||
@@ -30,6 +30,14 @@ const useSetIndexOptions: TUseSetOptions = (preset = false) => {
|
||||
};
|
||||
}
|
||||
|
||||
// Auto-enable Responses API when web search is enabled
|
||||
if (param === 'web_search' && newValue === true) {
|
||||
const currentUseResponsesApi = conversation?.useResponsesApi ?? false;
|
||||
if (!currentUseResponsesApi) {
|
||||
update['useResponsesApi'] = true;
|
||||
}
|
||||
}
|
||||
|
||||
setConversation(
|
||||
(prevState) =>
|
||||
tConvoUpdateSchema.parse({
|
||||
|
||||
@@ -1,33 +1,34 @@
|
||||
import { useQueryClient } from '@tanstack/react-query';
|
||||
import type { TEndpointsConfig, TError } from 'librechat-data-provider';
|
||||
import {
|
||||
defaultAssistantsVersion,
|
||||
fileConfig as defaultFileConfig,
|
||||
EModelEndpoint,
|
||||
isAgentsEndpoint,
|
||||
isAssistantsEndpoint,
|
||||
mergeFileConfig,
|
||||
QueryKeys,
|
||||
} from 'librechat-data-provider';
|
||||
import debounce from 'lodash/debounce';
|
||||
import React, { useCallback, useEffect, useMemo, useRef, useState } from 'react';
|
||||
import { v4 } from 'uuid';
|
||||
import { useQueryClient } from '@tanstack/react-query';
|
||||
import {
|
||||
QueryKeys,
|
||||
EModelEndpoint,
|
||||
mergeFileConfig,
|
||||
isAgentsEndpoint,
|
||||
isAssistantsEndpoint,
|
||||
defaultAssistantsVersion,
|
||||
fileConfig as defaultFileConfig,
|
||||
} from 'librechat-data-provider';
|
||||
import debounce from 'lodash/debounce';
|
||||
import type { EndpointFileConfig, TEndpointsConfig, TError } from 'librechat-data-provider';
|
||||
import type { ExtendedFile, FileSetter } from '~/common';
|
||||
import { useGetFileConfig, useUploadFileMutation } from '~/data-provider';
|
||||
import useLocalize, { TranslationKeys } from '~/hooks/useLocalize';
|
||||
import { useChatContext } from '~/Providers/ChatContext';
|
||||
import { useDelayedUploadToast } from './useDelayedUploadToast';
|
||||
import { processFileForUpload } from '~/utils/heicConverter';
|
||||
import { useToastContext } from '~/Providers/ToastContext';
|
||||
import { useChatContext } from '~/Providers/ChatContext';
|
||||
import { logger, validateFiles } from '~/utils';
|
||||
import useClientResize from './useClientResize';
|
||||
import { processFileForUpload } from '~/utils/heicConverter';
|
||||
import { useDelayedUploadToast } from './useDelayedUploadToast';
|
||||
import useUpdateFiles from './useUpdateFiles';
|
||||
|
||||
type UseFileHandling = {
|
||||
overrideEndpoint?: EModelEndpoint;
|
||||
fileSetter?: FileSetter;
|
||||
fileFilter?: (file: File) => boolean;
|
||||
additionalMetadata?: Record<string, string | undefined>;
|
||||
overrideEndpoint?: EModelEndpoint;
|
||||
overrideEndpointFileConfig?: EndpointFileConfig;
|
||||
};
|
||||
|
||||
const useFileHandling = (params?: UseFileHandling) => {
|
||||
@@ -246,8 +247,9 @@ const useFileHandling = (params?: UseFileHandling) => {
|
||||
fileList,
|
||||
setError,
|
||||
endpointFileConfig:
|
||||
fileConfig?.endpoints[endpoint] ??
|
||||
fileConfig?.endpoints.default ??
|
||||
params?.overrideEndpointFileConfig ??
|
||||
fileConfig?.endpoints?.[endpoint] ??
|
||||
fileConfig?.endpoints?.default ??
|
||||
defaultFileConfig.endpoints[endpoint] ??
|
||||
defaultFileConfig.endpoints.default,
|
||||
});
|
||||
|
||||
@@ -79,7 +79,7 @@ export default function useSideNavLinks({
|
||||
title: 'com_sidepanel_assistant_builder',
|
||||
label: '',
|
||||
icon: Blocks,
|
||||
id: 'assistants',
|
||||
id: EModelEndpoint.assistants,
|
||||
Component: PanelSwitch,
|
||||
});
|
||||
}
|
||||
@@ -94,7 +94,7 @@ export default function useSideNavLinks({
|
||||
title: 'com_sidepanel_agent_builder',
|
||||
label: '',
|
||||
icon: Blocks,
|
||||
id: 'agents',
|
||||
id: EModelEndpoint.agents,
|
||||
Component: AgentPanelSwitch,
|
||||
});
|
||||
}
|
||||
|
||||
@@ -55,6 +55,26 @@ export default function useStepHandler({
|
||||
const messageMap = useRef(new Map<string, TMessage>());
|
||||
const stepMap = useRef(new Map<string, Agents.RunStep>());
|
||||
|
||||
const calculateContentIndex = (
|
||||
baseIndex: number,
|
||||
initialContent: TMessageContentParts[],
|
||||
incomingContentType: string,
|
||||
existingContent?: TMessageContentParts[],
|
||||
): number => {
|
||||
/** Only apply -1 adjustment for TEXT or THINK types when they match existing content */
|
||||
if (
|
||||
initialContent.length > 0 &&
|
||||
(incomingContentType === ContentTypes.TEXT || incomingContentType === ContentTypes.THINK)
|
||||
) {
|
||||
const targetIndex = baseIndex + initialContent.length - 1;
|
||||
const existingType = existingContent?.[targetIndex]?.type;
|
||||
if (existingType === incomingContentType) {
|
||||
return targetIndex;
|
||||
}
|
||||
}
|
||||
return baseIndex + initialContent.length;
|
||||
};
|
||||
|
||||
const updateContent = (
|
||||
message: TMessage,
|
||||
index: number,
|
||||
@@ -170,6 +190,11 @@ export default function useStepHandler({
|
||||
lastAnnouncementTimeRef.current = currentTime;
|
||||
}
|
||||
|
||||
let initialContent: TMessageContentParts[] = [];
|
||||
if (submission?.editedContent != null) {
|
||||
initialContent = submission?.initialResponse?.content ?? initialContent;
|
||||
}
|
||||
|
||||
if (event === 'on_run_step') {
|
||||
const runStep = data as Agents.RunStep;
|
||||
const responseMessageId = runStep.runId ?? '';
|
||||
@@ -189,7 +214,7 @@ export default function useStepHandler({
|
||||
parentMessageId: userMessage.messageId,
|
||||
conversationId: userMessage.conversationId,
|
||||
messageId: responseMessageId,
|
||||
content: [],
|
||||
content: initialContent,
|
||||
};
|
||||
|
||||
messageMap.current.set(responseMessageId, response);
|
||||
@@ -214,7 +239,9 @@ export default function useStepHandler({
|
||||
},
|
||||
};
|
||||
|
||||
updatedResponse = updateContent(updatedResponse, runStep.index, contentPart);
|
||||
/** Tool calls don't need index adjustment */
|
||||
const currentIndex = runStep.index + initialContent.length;
|
||||
updatedResponse = updateContent(updatedResponse, currentIndex, contentPart);
|
||||
});
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
@@ -234,7 +261,9 @@ export default function useStepHandler({
|
||||
|
||||
const response = messageMap.current.get(responseMessageId);
|
||||
if (response) {
|
||||
const updatedResponse = updateContent(response, agent_update.index, data);
|
||||
// Agent updates don't need index adjustment
|
||||
const currentIndex = agent_update.index + initialContent.length;
|
||||
const updatedResponse = updateContent(response, currentIndex, data);
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const currentMessages = getMessages() || [];
|
||||
setMessages([...currentMessages.slice(0, -1), updatedResponse]);
|
||||
@@ -255,7 +284,13 @@ export default function useStepHandler({
|
||||
? messageDelta.delta.content[0]
|
||||
: messageDelta.delta.content;
|
||||
|
||||
const updatedResponse = updateContent(response, runStep.index, contentPart);
|
||||
const currentIndex = calculateContentIndex(
|
||||
runStep.index,
|
||||
initialContent,
|
||||
contentPart.type || '',
|
||||
response.content,
|
||||
);
|
||||
const updatedResponse = updateContent(response, currentIndex, contentPart);
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const currentMessages = getMessages() || [];
|
||||
@@ -277,7 +312,13 @@ export default function useStepHandler({
|
||||
? reasoningDelta.delta.content[0]
|
||||
: reasoningDelta.delta.content;
|
||||
|
||||
const updatedResponse = updateContent(response, runStep.index, contentPart);
|
||||
const currentIndex = calculateContentIndex(
|
||||
runStep.index,
|
||||
initialContent,
|
||||
contentPart.type || '',
|
||||
response.content,
|
||||
);
|
||||
const updatedResponse = updateContent(response, currentIndex, contentPart);
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const currentMessages = getMessages() || [];
|
||||
@@ -318,7 +359,9 @@ export default function useStepHandler({
|
||||
contentPart.tool_call.expires_at = runStepDelta.delta.expires_at;
|
||||
}
|
||||
|
||||
updatedResponse = updateContent(updatedResponse, runStep.index, contentPart);
|
||||
/** Tool calls don't need index adjustment */
|
||||
const currentIndex = runStep.index + initialContent.length;
|
||||
updatedResponse = updateContent(updatedResponse, currentIndex, contentPart);
|
||||
});
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
@@ -350,7 +393,9 @@ export default function useStepHandler({
|
||||
tool_call: result.tool_call,
|
||||
};
|
||||
|
||||
updatedResponse = updateContent(updatedResponse, runStep.index, contentPart, true);
|
||||
/** Tool calls don't need index adjustment */
|
||||
const currentIndex = runStep.index + initialContent.length;
|
||||
updatedResponse = updateContent(updatedResponse, currentIndex, contentPart, true);
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const updatedMessages = messages.map((msg) =>
|
||||
|
||||
@@ -695,6 +695,5 @@
|
||||
"com_ui_versions": "الإصدارات",
|
||||
"com_ui_yes": "نعم",
|
||||
"com_ui_zoom": "تكبير",
|
||||
"com_user_message": "أنت",
|
||||
"com_warning_resubmit_unsupported": "إعادة إرسال رسالة الذكاء الاصطناعي غير مدعومة لنقطة النهاية هذه"
|
||||
"com_user_message": "أنت"
|
||||
}
|
||||
@@ -868,6 +868,5 @@
|
||||
"com_ui_x_selected": "{{0}} seleccionats",
|
||||
"com_ui_yes": "Sí",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Tu",
|
||||
"com_warning_resubmit_unsupported": "Tornar a enviar el missatge de la IA no està suportat per aquest endpoint."
|
||||
"com_user_message": "Tu"
|
||||
}
|
||||
@@ -720,6 +720,5 @@
|
||||
"com_ui_write": "Psát",
|
||||
"com_ui_yes": "Ano",
|
||||
"com_ui_zoom": "Přiblížit",
|
||||
"com_user_message": "Vy",
|
||||
"com_warning_resubmit_unsupported": "Opětovné odeslání AI zprávy není pro tento koncový bod podporováno."
|
||||
"com_user_message": "Vy"
|
||||
}
|
||||
@@ -823,6 +823,5 @@
|
||||
"com_ui_x_selected": "{{0}} udvalgt",
|
||||
"com_ui_yes": "Ja",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Du",
|
||||
"com_warning_resubmit_unsupported": "Genindsendelse af AI-beskeden understøttes ikke for dette slutpunkt."
|
||||
"com_user_message": "Du"
|
||||
}
|
||||
@@ -917,6 +917,5 @@
|
||||
"com_ui_x_selected": "{{0}} ausgewählt",
|
||||
"com_ui_yes": "Ja",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Du",
|
||||
"com_warning_resubmit_unsupported": "Das erneute Senden der KI-Nachricht wird für diesen Endpunkt nicht unterstützt."
|
||||
"com_user_message": "Du"
|
||||
}
|
||||
@@ -205,10 +205,11 @@
|
||||
"com_endpoint_google_custom_name_placeholder": "Set a custom name for Google",
|
||||
"com_endpoint_google_maxoutputtokens": "Maximum number of tokens that can be generated in the response. Specify a lower value for shorter responses and a higher value for longer responses. Note: models may stop before reaching this maximum.",
|
||||
"com_endpoint_google_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
||||
"com_endpoint_google_topk": "Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature).",
|
||||
"com_endpoint_google_topp": "Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value.",
|
||||
"com_endpoint_google_thinking": "Enables or disables reasoning. This setting is only supported by certain models (2.5 series). For older models, this setting may have no effect.",
|
||||
"com_endpoint_google_thinking_budget": "Guides the number of thinking tokens the model uses. The actual amount may exceed or fall below this value depending on the prompt.\n\nThis setting is only supported by certain models (2.5 series). Gemini 2.5 Pro supports 128-32,768 tokens. Gemini 2.5 Flash supports 0-24,576 tokens. Gemini 2.5 Flash Lite supports 512-24,576 tokens.\n\nLeave blank or set to \"-1\" to let the model automatically decide when and how much to think. By default, Gemini 2.5 Flash Lite does not think.",
|
||||
"com_endpoint_google_topk": "Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature).",
|
||||
"com_endpoint_google_topp": "Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value.",
|
||||
"com_endpoint_google_use_search_grounding": "Use Google's search grounding feature to enhance responses with real-time web search results. This enables models to access current information and provide more accurate, up-to-date answers.",
|
||||
"com_endpoint_instructions_assistants": "Override Instructions",
|
||||
"com_endpoint_instructions_assistants_placeholder": "Overrides the instructions of the assistant. This is useful for modifying the behavior on a per-run basis.",
|
||||
"com_endpoint_max_output_tokens": "Max Output Tokens",
|
||||
@@ -226,11 +227,14 @@
|
||||
"com_endpoint_openai_pres": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
||||
"com_endpoint_openai_prompt_prefix_placeholder": "Set custom instructions to include in System Message. Default: none",
|
||||
"com_endpoint_openai_reasoning_effort": "o1 and o3 models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.",
|
||||
"com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.",
|
||||
"com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.",
|
||||
"com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.",
|
||||
"com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.",
|
||||
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
||||
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
|
||||
"com_endpoint_openai_use_responses_api": "Use the Responses API instead of Chat Completions, which includes extended features from OpenAI. Required for o1-pro, o3-pro, and to enable reasoning summaries.",
|
||||
"com_endpoint_openai_use_web_search": "Enable web search functionality using OpenAI's built-in search capabilities. This allows the model to search the web for up-to-date information and provide more accurate, current responses.",
|
||||
"com_endpoint_output": "Output",
|
||||
"com_endpoint_plug_image_detail": "Image Detail",
|
||||
"com_endpoint_plug_resend_files": "Resend Files",
|
||||
@@ -261,6 +265,7 @@
|
||||
"com_endpoint_prompt_prefix_assistants_placeholder": "Set additional instructions or context on top of the Assistant's main instructions. Ignored if empty.",
|
||||
"com_endpoint_prompt_prefix_placeholder": "Set custom instructions or context. Ignored if empty.",
|
||||
"com_endpoint_reasoning_effort": "Reasoning Effort",
|
||||
"com_endpoint_reasoning_summary": "Reasoning Summary",
|
||||
"com_endpoint_save_as_preset": "Save As Preset",
|
||||
"com_endpoint_search": "Search endpoint by name",
|
||||
"com_endpoint_search_endpoint_models": "Search {{0}} models...",
|
||||
@@ -276,6 +281,8 @@
|
||||
"com_endpoint_top_k": "Top K",
|
||||
"com_endpoint_top_p": "Top P",
|
||||
"com_endpoint_use_active_assistant": "Use Active Assistant",
|
||||
"com_endpoint_use_responses_api": "Use Responses API",
|
||||
"com_endpoint_use_search_grounding": "Grounding with Google Search",
|
||||
"com_error_expired_user_key": "Provided key for {{0}} expired at {{1}}. Please provide a new key and try again.",
|
||||
"com_error_files_dupe": "Duplicate file detected.",
|
||||
"com_error_files_empty": "Empty files are not allowed.",
|
||||
@@ -284,6 +291,7 @@
|
||||
"com_error_files_upload": "An error occurred while uploading the file.",
|
||||
"com_error_files_upload_canceled": "The file upload request was canceled. Note: the file upload may still be processing and will need to be manually deleted.",
|
||||
"com_error_files_validation": "An error occurred while validating the file.",
|
||||
"com_error_google_tool_conflict": "Usage of built-in Google tools are not supported with external tools. Please disable either the built-in tools or the external tools.",
|
||||
"com_error_heic_conversion": "Failed to convert HEIC image to JPEG. Please try converting the image manually or use a different format.",
|
||||
"com_error_input_length": "The latest message token count is too long, exceeding the token limit, or your token limit parameters are misconfigured, adversely affecting the context window. More info: {{0}}. Please shorten your message, adjust the max context size from the conversation parameters, or fork the conversation to continue.",
|
||||
"com_error_invalid_agent_provider": "The \"{{0}}\" provider is not available for use with Agents. Please go to your agent's settings and select a currently available provider.",
|
||||
@@ -634,6 +642,7 @@
|
||||
"com_ui_command_placeholder": "Optional: Enter a command for the prompt or name will be used",
|
||||
"com_ui_command_usage_placeholder": "Select a Prompt by command or name",
|
||||
"com_ui_complete_setup": "Complete Setup",
|
||||
"com_ui_concise": "Concise",
|
||||
"com_ui_configure_mcp_variables_for": "Configure Variables for {{0}}",
|
||||
"com_ui_confirm_action": "Confirm Action",
|
||||
"com_ui_confirm_admin_use_change": "Changing this setting will block access for admins, including yourself. Are you sure you want to proceed?",
|
||||
@@ -699,6 +708,7 @@
|
||||
"com_ui_description": "Description",
|
||||
"com_ui_description_placeholder": "Optional: Enter a description to display for the prompt",
|
||||
"com_ui_deselect_all": "Deselect All",
|
||||
"com_ui_detailed": "Detailed",
|
||||
"com_ui_disabling": "Disabling...",
|
||||
"com_ui_download": "Download",
|
||||
"com_ui_download_artifact": "Download Artifact",
|
||||
@@ -793,6 +803,7 @@
|
||||
"com_ui_happy_birthday": "It's my 1st birthday!",
|
||||
"com_ui_hide_image_details": "Hide Image Details",
|
||||
"com_ui_hide_qr": "Hide QR Code",
|
||||
"com_ui_high": "High",
|
||||
"com_ui_host": "Host",
|
||||
"com_ui_icon": "Icon",
|
||||
"com_ui_idea": "Ideas",
|
||||
@@ -820,6 +831,7 @@
|
||||
"com_ui_loading": "Loading...",
|
||||
"com_ui_locked": "Locked",
|
||||
"com_ui_logo": "{{0}} Logo",
|
||||
"com_ui_low": "Low",
|
||||
"com_ui_manage": "Manage",
|
||||
"com_ui_max_tags": "Maximum number allowed is {{0}}, using latest values.",
|
||||
"com_ui_mcp_dialog_desc": "Please enter the necessary information below.",
|
||||
@@ -827,6 +839,7 @@
|
||||
"com_ui_mcp_server_not_found": "Server not found.",
|
||||
"com_ui_mcp_servers": "MCP Servers",
|
||||
"com_ui_mcp_url": "MCP Server URL",
|
||||
"com_ui_medium": "Medium",
|
||||
"com_ui_memories": "Memories",
|
||||
"com_ui_memories_allow_create": "Allow creating Memories",
|
||||
"com_ui_memories_allow_opt_out": "Allow users to opt out of Memories",
|
||||
@@ -916,6 +929,7 @@
|
||||
"com_ui_rename_prompt": "Rename Prompt",
|
||||
"com_ui_requires_auth": "Requires Authentication",
|
||||
"com_ui_reset_var": "Reset {{0}}",
|
||||
"com_ui_reset_zoom": "Reset Zoom",
|
||||
"com_ui_result": "Result",
|
||||
"com_ui_revoke": "Revoke",
|
||||
"com_ui_revoke_info": "Revoke all user provided credentials",
|
||||
@@ -1057,7 +1071,5 @@
|
||||
"com_ui_x_selected": "{{0}} selected",
|
||||
"com_ui_yes": "Yes",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_ui_reset_zoom": "Reset Zoom",
|
||||
"com_user_message": "You",
|
||||
"com_warning_resubmit_unsupported": "Resubmitting the AI message is not supported for this endpoint."
|
||||
}
|
||||
"com_user_message": "You"
|
||||
}
|
||||
@@ -752,6 +752,5 @@
|
||||
"com_ui_x_selected": "{{0}} seleccionado",
|
||||
"com_ui_yes": "Sí",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Usted",
|
||||
"com_warning_resubmit_unsupported": "No se admite el reenvío del mensaje de IA para este punto de conexión."
|
||||
"com_user_message": "Usted"
|
||||
}
|
||||
@@ -865,6 +865,5 @@
|
||||
"com_ui_x_selected": "{{0}} valitud",
|
||||
"com_ui_yes": "Jah",
|
||||
"com_ui_zoom": "Suumi",
|
||||
"com_user_message": "Sina",
|
||||
"com_warning_resubmit_unsupported": "AI sõnumi uuesti esitamine pole selle otspunkti jaoks toetatud."
|
||||
"com_user_message": "Sina"
|
||||
}
|
||||
@@ -847,6 +847,5 @@
|
||||
"com_ui_write": "نوشتن",
|
||||
"com_ui_yes": "بله",
|
||||
"com_ui_zoom": "بزرگنمایی ضربه بزنید؛",
|
||||
"com_user_message": "شما",
|
||||
"com_warning_resubmit_unsupported": "ارسال مجدد پیام هوش مصنوعی برای این نقطه پایانی پشتیبانی نمی شود."
|
||||
"com_user_message": "شما"
|
||||
}
|
||||
@@ -752,6 +752,5 @@
|
||||
"com_ui_versions": "Versions",
|
||||
"com_ui_yes": "Oui",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Vous",
|
||||
"com_warning_resubmit_unsupported": "La resoumission du message IA n'est pas prise en charge pour ce point de terminaison."
|
||||
"com_user_message": "Vous"
|
||||
}
|
||||
@@ -863,6 +863,5 @@
|
||||
"com_ui_x_selected": "{{0}} נבחר",
|
||||
"com_ui_yes": "כן",
|
||||
"com_ui_zoom": "זום",
|
||||
"com_user_message": "אתה",
|
||||
"com_warning_resubmit_unsupported": "שליחת הודעה מחדש אינה נתמכת עבור נקודת קצה זו."
|
||||
"com_user_message": "אתה"
|
||||
}
|
||||
@@ -847,6 +847,5 @@
|
||||
"com_ui_write": "Írás",
|
||||
"com_ui_yes": "Igen",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Ön",
|
||||
"com_warning_resubmit_unsupported": "Az AI üzenet újraküldése nem támogatott ennél a végpontnál."
|
||||
"com_user_message": "Ön"
|
||||
}
|
||||
@@ -829,6 +829,5 @@
|
||||
"com_ui_write": "Scrittura",
|
||||
"com_ui_yes": "Sì",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Mostra nome utente nei messaggi",
|
||||
"com_warning_resubmit_unsupported": "Il reinvio del messaggio AI non è supportato per questo endpoint."
|
||||
"com_user_message": "Mostra nome utente nei messaggi"
|
||||
}
|
||||
@@ -868,6 +868,5 @@
|
||||
"com_ui_x_selected": "{{0}}が選択された",
|
||||
"com_ui_yes": "はい",
|
||||
"com_ui_zoom": "ズーム",
|
||||
"com_user_message": "あなた",
|
||||
"com_warning_resubmit_unsupported": "このエンドポイントではAIメッセージの再送信はサポートされていません"
|
||||
"com_user_message": "あなた"
|
||||
}
|
||||
@@ -921,6 +921,5 @@
|
||||
"com_ui_x_selected": "{{0}}개 선택됨",
|
||||
"com_ui_yes": "네",
|
||||
"com_ui_zoom": "확대/축소",
|
||||
"com_user_message": "당신",
|
||||
"com_warning_resubmit_unsupported": "이 엔드포인트에서는 AI 메시지 재전송이 지원되지 않습니다"
|
||||
"com_user_message": "당신"
|
||||
}
|
||||
@@ -714,6 +714,5 @@
|
||||
"com_ui_view_source": "Zobacz źródłowy czat",
|
||||
"com_ui_yes": "Tak",
|
||||
"com_ui_zoom": "Powiększ",
|
||||
"com_user_message": "Ty",
|
||||
"com_warning_resubmit_unsupported": "Ponowne przesyłanie wiadomości AI nie jest obsługiwane dla tego punktu końcowego."
|
||||
"com_user_message": "Ty"
|
||||
}
|
||||
@@ -817,6 +817,5 @@
|
||||
"com_ui_write": "Escrevendo",
|
||||
"com_ui_yes": "Sim",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Você",
|
||||
"com_warning_resubmit_unsupported": "O reenvio da mensagem de IA não é suportado para este endpoint."
|
||||
"com_user_message": "Você"
|
||||
}
|
||||
@@ -819,6 +819,5 @@
|
||||
"com_ui_write": "A escrever",
|
||||
"com_ui_yes": "Sim",
|
||||
"com_ui_zoom": "Ampliar",
|
||||
"com_user_message": "Você",
|
||||
"com_warning_resubmit_unsupported": "O reenvio da mensagem de IA não é suportado por este endereço."
|
||||
"com_user_message": "Você"
|
||||
}
|
||||
@@ -865,6 +865,5 @@
|
||||
"com_ui_x_selected": "{{0}} выбрано",
|
||||
"com_ui_yes": "Да",
|
||||
"com_ui_zoom": "Масштаб",
|
||||
"com_user_message": "Вы",
|
||||
"com_warning_resubmit_unsupported": "Повторная отправка сообщения ИИ не поддерживается для данной конечной точки"
|
||||
"com_user_message": "Вы"
|
||||
}
|
||||
@@ -802,6 +802,5 @@
|
||||
"com_ui_write": "การเขียน",
|
||||
"com_ui_yes": "ใช่",
|
||||
"com_ui_zoom": "ขยาย",
|
||||
"com_user_message": "คุณ",
|
||||
"com_warning_resubmit_unsupported": "การส่งข้อความ AI ซ้ำไม่รองรับสำหรับจุดสิ้นสุดนี้"
|
||||
"com_user_message": "คุณ"
|
||||
}
|
||||
@@ -725,6 +725,5 @@
|
||||
"com_ui_view_source": "Kaynak sohbeti görüntüle",
|
||||
"com_ui_yes": "Evet",
|
||||
"com_ui_zoom": "Yakınlaştır",
|
||||
"com_user_message": "Sen",
|
||||
"com_warning_resubmit_unsupported": "Bu uç nokta için yapay zeka mesajını yeniden gönderme desteklenmiyor."
|
||||
"com_user_message": "Sen"
|
||||
}
|
||||
@@ -852,6 +852,5 @@
|
||||
"com_ui_x_selected": "{{0}} 已选择",
|
||||
"com_ui_yes": "是的",
|
||||
"com_ui_zoom": "缩放",
|
||||
"com_user_message": "您",
|
||||
"com_warning_resubmit_unsupported": "此终端不支持重新提交AI消息"
|
||||
"com_user_message": "您"
|
||||
}
|
||||
@@ -695,6 +695,5 @@
|
||||
"com_ui_versions": "版本",
|
||||
"com_ui_yes": "是",
|
||||
"com_ui_zoom": "縮放",
|
||||
"com_user_message": "您",
|
||||
"com_warning_resubmit_unsupported": "此端點不支援重新送出 AI 訊息。"
|
||||
"com_user_message": "您"
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
// v0.7.8
|
||||
// v0.7.9-rc1
|
||||
// See .env.test.example for an example of the '.env.test' file.
|
||||
require('dotenv').config({ path: './e2e/.env.test' });
|
||||
|
||||
@@ -22,7 +22,7 @@ version: 1.8.9
|
||||
# It is recommended to use it with quotes.
|
||||
|
||||
# renovate: image=ghcr.io/danny-avila/librechat
|
||||
appVersion: "v0.7.8"
|
||||
appVersion: "v0.7.9-rc1"
|
||||
|
||||
home: https://www.librechat.ai
|
||||
|
||||
|
||||
100
package-lock.json
generated
100
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "LibreChat",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "LibreChat",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"license": "ISC",
|
||||
"workspaces": [
|
||||
"api",
|
||||
@@ -47,7 +47,7 @@
|
||||
},
|
||||
"api": {
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.52.0",
|
||||
@@ -64,7 +64,7 @@
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.50",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@node-saml/passport-saml": "^5.0.0",
|
||||
@@ -2502,7 +2502,7 @@
|
||||
},
|
||||
"client": {
|
||||
"name": "@librechat/frontend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ariakit/react": "^0.4.15",
|
||||
@@ -19436,9 +19436,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents": {
|
||||
"version": "2.4.46",
|
||||
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.46.tgz",
|
||||
"integrity": "sha512-zR27U19/WGF3HN64oBbiaFgjjWHaF7BjYzRFWzQKEkk+iEzCe59IpuEZUizQ54YcY02nhhh6S3MNUjhAJwMYVA==",
|
||||
"version": "2.4.50",
|
||||
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.50.tgz",
|
||||
"integrity": "sha512-8yUndPTa5ctxGBqlzMcyBDi+c6lup37wtXXFJMyBcm2Bx4MhqrEOMdI3HRu/3CsYpRgC07wAK2AwZ593aGLWoA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@langchain/anthropic": "^0.3.23",
|
||||
@@ -38719,21 +38719,57 @@
|
||||
"integrity": "sha512-KG8UEiEVkR3wGEb4m5yZkVCzigAD+cVEJck2CzYZO37ZGJfctvVptVO192MwrtPhzONn6go8ylnOdMhKqi4nfg=="
|
||||
},
|
||||
"node_modules/pbkdf2": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.2.tgz",
|
||||
"integrity": "sha512-iuh7L6jA7JEGu2WxDwtQP1ddOpaJNC4KlDEFfdQajSGgGPNi4OyDc2R7QnbY2bR9QjBVGwgvTdNJZoE7RaxUMA==",
|
||||
"version": "3.1.3",
|
||||
"resolved": "https://registry.npmjs.org/pbkdf2/-/pbkdf2-3.1.3.tgz",
|
||||
"integrity": "sha512-wfRLBZ0feWRhCIkoMB6ete7czJcnNnqRpcoWQBLqatqXXmelSRqfdDK4F3u9T2s2cXas/hQJcryI/4lAL+XTlA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"create-hash": "^1.1.2",
|
||||
"create-hmac": "^1.1.4",
|
||||
"ripemd160": "^2.0.1",
|
||||
"safe-buffer": "^5.0.1",
|
||||
"sha.js": "^2.4.8"
|
||||
"create-hash": "~1.1.3",
|
||||
"create-hmac": "^1.1.7",
|
||||
"ripemd160": "=2.0.1",
|
||||
"safe-buffer": "^5.2.1",
|
||||
"sha.js": "^2.4.11",
|
||||
"to-buffer": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=0.12"
|
||||
}
|
||||
},
|
||||
"node_modules/pbkdf2/node_modules/create-hash": {
|
||||
"version": "1.1.3",
|
||||
"resolved": "https://registry.npmjs.org/create-hash/-/create-hash-1.1.3.tgz",
|
||||
"integrity": "sha512-snRpch/kwQhcdlnZKYanNF1m0RDlrCdSKQaH87w1FCFPVPNCQ/Il9QJKAX2jVBZddRdaHBMC+zXa9Gw9tmkNUA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"cipher-base": "^1.0.1",
|
||||
"inherits": "^2.0.1",
|
||||
"ripemd160": "^2.0.0",
|
||||
"sha.js": "^2.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/pbkdf2/node_modules/hash-base": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hash-base/-/hash-base-2.0.2.tgz",
|
||||
"integrity": "sha512-0TROgQ1/SxE6KmxWSvXHvRj90/Xo1JvZShofnYF+f6ZsGtR4eES7WfrQzPalmyagfKZCXpVnitiRebZulWsbiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"inherits": "^2.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/pbkdf2/node_modules/ripemd160": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.1.tgz",
|
||||
"integrity": "sha512-J7f4wutN8mdbV08MJnXibYpCOPHR+yzy+iQ/AsjMv2j8cLavQ8VGagDFUwwTAdF8FmRKVeNpbTTEwNHCW1g94w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"hash-base": "^2.0.0",
|
||||
"inherits": "^2.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/peek-readable": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/peek-readable/-/peek-readable-5.0.0.tgz",
|
||||
@@ -39919,9 +39955,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/prettier-eslint/node_modules/brace-expansion": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz",
|
||||
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==",
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
|
||||
"integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
@@ -43961,6 +43997,28 @@
|
||||
"integrity": "sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw==",
|
||||
"dev": true
|
||||
},
|
||||
"node_modules/to-buffer": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/to-buffer/-/to-buffer-1.2.1.tgz",
|
||||
"integrity": "sha512-tB82LpAIWjhLYbqjx3X4zEeHN6M8CiuOEy2JY8SEQVdYRe3CCHOFaqrBW1doLDrfpWhplcW7BL+bO3/6S3pcDQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"isarray": "^2.0.5",
|
||||
"safe-buffer": "^5.2.1",
|
||||
"typed-array-buffer": "^1.0.3"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/to-buffer/node_modules/isarray": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
|
||||
"integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
@@ -46566,7 +46624,7 @@
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.50",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.12.3",
|
||||
"axios": "^1.8.2",
|
||||
@@ -46659,7 +46717,7 @@
|
||||
},
|
||||
"packages/data-provider": {
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.7.88",
|
||||
"version": "0.7.899",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"axios": "^1.8.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "LibreChat",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"description": "",
|
||||
"workspaces": [
|
||||
"api",
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"registry": "https://registry.npmjs.org/"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.50",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.12.3",
|
||||
"axios": "^1.8.2",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Run, Providers } from '@librechat/agents';
|
||||
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
|
||||
import type {
|
||||
OpenAIClientOptions,
|
||||
StandardGraphConfig,
|
||||
EventHandler,
|
||||
GenericTool,
|
||||
@@ -76,6 +77,11 @@ export async function createRun({
|
||||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
|
||||
(provider === Providers.OPENAI || provider === Providers.AZURE)
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
}
|
||||
|
||||
const graphConfig: StandardGraphConfig = {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Providers } from '@librechat/agents';
|
||||
import { googleSettings, AuthKeys } from 'librechat-data-provider';
|
||||
import type { GoogleClientOptions, VertexAIClientOptions } from '@librechat/agents';
|
||||
import type { GoogleAIToolType } from '@langchain/google-common';
|
||||
import type * as t from '~/types';
|
||||
import { isEnabled } from '~/utils';
|
||||
|
||||
@@ -98,13 +99,14 @@ export function getGoogleConfig(
|
||||
const serviceKey =
|
||||
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
|
||||
|
||||
const project_id = serviceKey?.project_id ?? null;
|
||||
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
|
||||
const project_id = !apiKey ? (serviceKey?.project_id ?? null) : null;
|
||||
|
||||
const reverseProxyUrl = options.reverseProxyUrl;
|
||||
const authHeader = options.authHeader;
|
||||
|
||||
const {
|
||||
grounding,
|
||||
thinking = googleSettings.thinking.default,
|
||||
thinkingBudget = googleSettings.thinkingBudget.default,
|
||||
...modelOptions
|
||||
@@ -128,7 +130,7 @@ export function getGoogleConfig(
|
||||
}
|
||||
|
||||
// If we have a GCP project => Vertex AI
|
||||
if (project_id && provider === Providers.VERTEXAI) {
|
||||
if (provider === Providers.VERTEXAI) {
|
||||
(llmConfig as VertexAIClientOptions).authOptions = {
|
||||
credentials: { ...serviceKey },
|
||||
projectId: project_id,
|
||||
@@ -136,6 +138,10 @@ export function getGoogleConfig(
|
||||
(llmConfig as VertexAIClientOptions).location = process.env.GOOGLE_LOC || 'us-central1';
|
||||
} else if (apiKey && provider === Providers.GOOGLE) {
|
||||
llmConfig.apiKey = apiKey;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid credentials provided. Please provide either a valid API key or service account credentials for Google Cloud.`,
|
||||
);
|
||||
}
|
||||
|
||||
const shouldEnableThinking =
|
||||
@@ -183,8 +189,16 @@ export function getGoogleConfig(
|
||||
};
|
||||
}
|
||||
|
||||
const tools: GoogleAIToolType[] = [];
|
||||
|
||||
if (grounding) {
|
||||
tools.push({ googleSearch: {} });
|
||||
}
|
||||
|
||||
// Return the final shape
|
||||
return {
|
||||
/** @type {GoogleAIToolType[]} */
|
||||
tools,
|
||||
/** @type {Providers.GOOGLE | Providers.VERTEXAI} */
|
||||
provider,
|
||||
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
||||
|
||||
@@ -1,9 +1,25 @@
|
||||
import { ProxyAgent } from 'undici';
|
||||
import { KnownEndpoints } from 'librechat-data-provider';
|
||||
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
|
||||
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
||||
import type { AzureOpenAIInput } from '@langchain/openai';
|
||||
import type { OpenAI } from 'openai';
|
||||
import type * as t from '~/types';
|
||||
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
|
||||
import { isEnabled } from '~/utils/common';
|
||||
|
||||
function hasReasoningParams({
|
||||
reasoning_effort,
|
||||
reasoning_summary,
|
||||
}: {
|
||||
reasoning_effort?: string | null;
|
||||
reasoning_summary?: string | null;
|
||||
}): boolean {
|
||||
return (
|
||||
(reasoning_effort != null && reasoning_effort !== '') ||
|
||||
(reasoning_summary != null && reasoning_summary !== '')
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates configuration options for creating a language model (LLM) instance.
|
||||
* @param apiKey - The API key for authentication.
|
||||
@@ -17,7 +33,7 @@ export function getOpenAIConfig(
|
||||
endpoint?: string | null,
|
||||
): t.LLMConfigResult {
|
||||
const {
|
||||
modelOptions = {},
|
||||
modelOptions: _modelOptions = {},
|
||||
reverseProxyUrl,
|
||||
defaultQuery,
|
||||
headers,
|
||||
@@ -27,8 +43,10 @@ export function getOpenAIConfig(
|
||||
addParams,
|
||||
dropParams,
|
||||
} = options;
|
||||
|
||||
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
|
||||
const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions;
|
||||
const llmConfig: Partial<t.ClientOptions> &
|
||||
Partial<t.OpenAIParameters> &
|
||||
Partial<AzureOpenAIInput> = Object.assign(
|
||||
{
|
||||
streaming,
|
||||
model: modelOptions.model ?? '',
|
||||
@@ -40,39 +58,6 @@ export function getOpenAIConfig(
|
||||
Object.assign(llmConfig, addParams);
|
||||
}
|
||||
|
||||
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
|
||||
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
|
||||
const searchExcludeParams = [
|
||||
'frequency_penalty',
|
||||
'presence_penalty',
|
||||
'temperature',
|
||||
'top_p',
|
||||
'top_k',
|
||||
'stop',
|
||||
'logit_bias',
|
||||
'seed',
|
||||
'response_format',
|
||||
'n',
|
||||
'logprobs',
|
||||
'user',
|
||||
];
|
||||
|
||||
const updatedDropParams = dropParams || [];
|
||||
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
|
||||
|
||||
combinedDropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
} else if (dropParams && Array.isArray(dropParams)) {
|
||||
dropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let useOpenRouter = false;
|
||||
const configOptions: t.OpenAIConfiguration = {};
|
||||
|
||||
@@ -119,7 +104,10 @@ export function getOpenAIConfig(
|
||||
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
}
|
||||
|
||||
if (configOptions.baseURL) {
|
||||
const constructBaseURL = () => {
|
||||
if (!configOptions.baseURL) {
|
||||
return;
|
||||
}
|
||||
const azureURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL,
|
||||
azureOptions: updatedAzure,
|
||||
@@ -127,9 +115,40 @@ export function getOpenAIConfig(
|
||||
updatedAzure.azureOpenAIBasePath = azureURL.split(
|
||||
`/${updatedAzure.azureOpenAIApiDeploymentName}`,
|
||||
)[0];
|
||||
}
|
||||
};
|
||||
|
||||
constructBaseURL();
|
||||
Object.assign(llmConfig, updatedAzure);
|
||||
|
||||
const constructAzureResponsesApi = () => {
|
||||
if (!llmConfig.useResponsesApi) {
|
||||
return;
|
||||
}
|
||||
|
||||
configOptions.baseURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL || 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
|
||||
azureOptions: llmConfig,
|
||||
});
|
||||
|
||||
delete llmConfig.azureOpenAIApiDeploymentName;
|
||||
delete llmConfig.azureOpenAIApiInstanceName;
|
||||
delete llmConfig.azureOpenAIApiVersion;
|
||||
delete llmConfig.azureOpenAIBasePath;
|
||||
delete llmConfig.azureOpenAIApiKey;
|
||||
llmConfig.apiKey = apiKey;
|
||||
|
||||
configOptions.defaultHeaders = {
|
||||
...configOptions.defaultHeaders,
|
||||
'api-key': apiKey,
|
||||
};
|
||||
configOptions.defaultQuery = {
|
||||
...configOptions.defaultQuery,
|
||||
'api-version': 'preview',
|
||||
};
|
||||
};
|
||||
|
||||
constructAzureResponsesApi();
|
||||
|
||||
llmConfig.model = updatedAzure.azureOpenAIApiDeploymentName;
|
||||
} else {
|
||||
llmConfig.apiKey = apiKey;
|
||||
@@ -139,11 +158,19 @@ export function getOpenAIConfig(
|
||||
configOptions.organization = process.env.OPENAI_ORGANIZATION;
|
||||
}
|
||||
|
||||
if (useOpenRouter && llmConfig.reasoning_effort != null) {
|
||||
llmConfig.reasoning = {
|
||||
effort: llmConfig.reasoning_effort,
|
||||
};
|
||||
delete llmConfig.reasoning_effort;
|
||||
if (
|
||||
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
|
||||
(llmConfig.useResponsesApi === true || useOpenRouter)
|
||||
) {
|
||||
llmConfig.reasoning = removeNullishValues(
|
||||
{
|
||||
effort: reasoning_effort,
|
||||
summary: reasoning_summary,
|
||||
},
|
||||
true,
|
||||
) as OpenAI.Reasoning;
|
||||
} else if (hasReasoningParams({ reasoning_effort })) {
|
||||
llmConfig.reasoning_effort = reasoning_effort;
|
||||
}
|
||||
|
||||
if (llmConfig.max_tokens != null) {
|
||||
@@ -151,8 +178,53 @@ export function getOpenAIConfig(
|
||||
delete llmConfig.max_tokens;
|
||||
}
|
||||
|
||||
const tools: BindToolsInput[] = [];
|
||||
|
||||
if (modelOptions.web_search) {
|
||||
llmConfig.useResponsesApi = true;
|
||||
tools.push({ type: 'web_search_preview' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
|
||||
*/
|
||||
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
|
||||
const searchExcludeParams = [
|
||||
'frequency_penalty',
|
||||
'presence_penalty',
|
||||
'reasoning',
|
||||
'reasoning_effort',
|
||||
'temperature',
|
||||
'top_p',
|
||||
'top_k',
|
||||
'stop',
|
||||
'logit_bias',
|
||||
'seed',
|
||||
'response_format',
|
||||
'n',
|
||||
'logprobs',
|
||||
'user',
|
||||
];
|
||||
|
||||
const updatedDropParams = dropParams || [];
|
||||
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
|
||||
|
||||
combinedDropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
} else if (dropParams && Array.isArray(dropParams)) {
|
||||
dropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
llmConfig,
|
||||
configOptions,
|
||||
tools,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import type {
|
||||
OCRImage,
|
||||
} from '~/types';
|
||||
import { logAxiosError, createAxiosInstance } from '~/utils/axios';
|
||||
import { loadServiceKey } from '~/utils/key';
|
||||
|
||||
const axios = createAxiosInstance();
|
||||
const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1';
|
||||
@@ -32,6 +33,13 @@ interface AuthConfig {
|
||||
baseURL: string;
|
||||
}
|
||||
|
||||
/** Helper type for Google service account */
|
||||
interface GoogleServiceAccount {
|
||||
client_email?: string;
|
||||
private_key?: string;
|
||||
project_id?: string;
|
||||
}
|
||||
|
||||
/** Helper type for OCR request context */
|
||||
interface OCRContext {
|
||||
req: Pick<ServerRequest, 'user' | 'app'> & {
|
||||
@@ -424,3 +432,216 @@ export const uploadAzureMistralOCR = async (
|
||||
throw createOCRError(error, 'Error uploading document to Azure Mistral OCR API:');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Loads Google service account configuration
|
||||
*/
|
||||
async function loadGoogleAuthConfig(): Promise<{
|
||||
serviceAccount: GoogleServiceAccount;
|
||||
accessToken: string;
|
||||
}> {
|
||||
/** Path from environment variable or default location */
|
||||
const serviceKeyPath =
|
||||
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
|
||||
path.join(__dirname, '..', '..', '..', 'api', 'data', 'auth.json');
|
||||
|
||||
const serviceKey = await loadServiceKey(serviceKeyPath);
|
||||
|
||||
if (!serviceKey) {
|
||||
throw new Error(
|
||||
`Google service account not found or could not be loaded from ${serviceKeyPath}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!serviceKey.client_email || !serviceKey.private_key || !serviceKey.project_id) {
|
||||
throw new Error('Invalid Google service account configuration');
|
||||
}
|
||||
|
||||
const jwt = await createJWT(serviceKey as GoogleServiceAccount);
|
||||
const accessToken = await exchangeJWTForAccessToken(jwt);
|
||||
|
||||
return {
|
||||
serviceAccount: serviceKey as GoogleServiceAccount,
|
||||
accessToken,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a JWT token manually
|
||||
*/
|
||||
async function createJWT(serviceKey: GoogleServiceAccount): Promise<string> {
|
||||
const crypto = await import('crypto');
|
||||
|
||||
const header = {
|
||||
alg: 'RS256',
|
||||
typ: 'JWT',
|
||||
};
|
||||
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const payload = {
|
||||
iss: serviceKey.client_email,
|
||||
scope: 'https://www.googleapis.com/auth/cloud-platform',
|
||||
aud: 'https://oauth2.googleapis.com/token',
|
||||
exp: now + 3600,
|
||||
iat: now,
|
||||
};
|
||||
|
||||
const encodedHeader = Buffer.from(JSON.stringify(header)).toString('base64url');
|
||||
const encodedPayload = Buffer.from(JSON.stringify(payload)).toString('base64url');
|
||||
|
||||
const signatureInput = `${encodedHeader}.${encodedPayload}`;
|
||||
|
||||
const sign = crypto.createSign('RSA-SHA256');
|
||||
sign.update(signatureInput);
|
||||
sign.end();
|
||||
|
||||
const signature = sign.sign(serviceKey.private_key!, 'base64url');
|
||||
|
||||
return `${signatureInput}.${signature}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Exchanges JWT for access token
|
||||
*/
|
||||
async function exchangeJWTForAccessToken(jwt: string): Promise<string> {
|
||||
const response = await axios.post(
|
||||
'https://oauth2.googleapis.com/token',
|
||||
new URLSearchParams({
|
||||
grant_type: 'urn:ietf:params:oauth:grant-type:jwt-bearer',
|
||||
assertion: jwt,
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/x-www-form-urlencoded',
|
||||
},
|
||||
},
|
||||
);
|
||||
|
||||
if (!response.data?.access_token) {
|
||||
throw new Error('No access token in response');
|
||||
}
|
||||
|
||||
return response.data.access_token;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs OCR using Google Vertex AI
|
||||
*/
|
||||
async function performGoogleVertexOCR({
|
||||
url,
|
||||
accessToken,
|
||||
projectId,
|
||||
model,
|
||||
documentType = 'document_url',
|
||||
}: {
|
||||
url: string;
|
||||
accessToken: string;
|
||||
projectId: string;
|
||||
model: string;
|
||||
documentType?: 'document_url' | 'image_url';
|
||||
}): Promise<OCRResult> {
|
||||
const location = process.env.GOOGLE_LOC || 'us-central1';
|
||||
const modelId = model || 'mistral-ocr-2505';
|
||||
|
||||
let baseURL: string;
|
||||
if (location === 'global') {
|
||||
baseURL = `https://aiplatform.googleapis.com/v1/projects/${projectId}/locations/global/publishers/mistralai/models/${modelId}:rawPredict`;
|
||||
} else {
|
||||
baseURL = `https://${location}-aiplatform.googleapis.com/v1/projects/${projectId}/locations/${location}/publishers/mistralai/models/${modelId}:rawPredict`;
|
||||
}
|
||||
|
||||
const documentKey = documentType === 'image_url' ? 'image_url' : 'document_url';
|
||||
|
||||
const requestBody = {
|
||||
model: modelId,
|
||||
document: {
|
||||
type: documentType,
|
||||
[documentKey]: url,
|
||||
},
|
||||
include_image_base64: true,
|
||||
};
|
||||
|
||||
logger.debug('Sending request to Google Vertex AI:', {
|
||||
url: baseURL,
|
||||
body: {
|
||||
...requestBody,
|
||||
document: { ...requestBody.document, [documentKey]: 'base64_data_hidden' },
|
||||
},
|
||||
});
|
||||
|
||||
return axios
|
||||
.post(baseURL, requestBody, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
Accept: 'application/json',
|
||||
},
|
||||
})
|
||||
.then((res) => {
|
||||
logger.debug('Google Vertex AI response received');
|
||||
return res.data;
|
||||
})
|
||||
.catch((error) => {
|
||||
if (error.response?.data) {
|
||||
logger.error('Vertex AI error response: ' + JSON.stringify(error.response.data, null, 2));
|
||||
}
|
||||
throw new Error(
|
||||
logAxiosError({
|
||||
error: error as AxiosError,
|
||||
message: 'Error calling Google Vertex AI Mistral OCR',
|
||||
}),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Use Google Vertex AI Mistral OCR API to process the OCR result.
|
||||
*
|
||||
* @param params - The params object.
|
||||
* @param params.req - The request object from Express. It should have a `user` property with an `id`
|
||||
* representing the user
|
||||
* @param params.file - The file object, which is part of the request. The file object should
|
||||
* have a `mimetype` property that tells us the file type
|
||||
* @param params.loadAuthValues - Function to load authentication values
|
||||
* @returns - The result object containing the processed `text` and `images` (not currently used),
|
||||
* along with the `filename` and `bytes` properties.
|
||||
*/
|
||||
export const uploadGoogleVertexMistralOCR = async (
|
||||
context: OCRContext,
|
||||
): Promise<MistralOCRUploadResult> => {
|
||||
try {
|
||||
const { serviceAccount, accessToken } = await loadGoogleAuthConfig();
|
||||
const model = getModelConfig(context.req.app.locals?.ocr);
|
||||
|
||||
const buffer = fs.readFileSync(context.file.path);
|
||||
const base64 = buffer.toString('base64');
|
||||
const base64Prefix = `data:${context.file.mimetype || 'application/pdf'};base64,`;
|
||||
|
||||
const documentType = getDocumentType(context.file);
|
||||
const ocrResult = await performGoogleVertexOCR({
|
||||
url: `${base64Prefix}${base64}`,
|
||||
accessToken,
|
||||
projectId: serviceAccount.project_id!,
|
||||
model,
|
||||
documentType,
|
||||
});
|
||||
|
||||
if (!ocrResult || !ocrResult.pages || ocrResult.pages.length === 0) {
|
||||
throw new Error(
|
||||
'No OCR result returned from service, may be down or the file is not supported.',
|
||||
);
|
||||
}
|
||||
|
||||
const { text, images } = processOCRResult(ocrResult);
|
||||
|
||||
return {
|
||||
filename: context.file.originalname,
|
||||
bytes: text.length * 4,
|
||||
filepath: FileSources.vertexai_mistral_ocr as string,
|
||||
text,
|
||||
images,
|
||||
};
|
||||
} catch (error) {
|
||||
throw createOCRError(error, 'Error uploading document to Google Vertex AI Mistral OCR:');
|
||||
}
|
||||
};
|
||||
|
||||
@@ -11,6 +11,8 @@ export * from './oauth';
|
||||
export * from './crypto';
|
||||
/* Flow */
|
||||
export * from './flow/manager';
|
||||
/* Middleware */
|
||||
export * from './middleware';
|
||||
/* Agents */
|
||||
export * from './agents';
|
||||
/* Endpoints */
|
||||
|
||||
141
packages/api/src/middleware/access.ts
Normal file
141
packages/api/src/middleware/access.ts
Normal file
@@ -0,0 +1,141 @@
|
||||
import { logger } from '@librechat/data-schemas';
|
||||
import {
|
||||
Permissions,
|
||||
EndpointURLs,
|
||||
EModelEndpoint,
|
||||
PermissionTypes,
|
||||
isAgentsEndpoint,
|
||||
} from 'librechat-data-provider';
|
||||
import type { NextFunction, Request as ServerRequest, Response as ServerResponse } from 'express';
|
||||
import type { IRole, IUser } from '@librechat/data-schemas';
|
||||
|
||||
export function skipAgentCheck(req?: ServerRequest): boolean {
|
||||
if (!req || !req?.body?.endpoint) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (req.method !== 'POST') {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!req.originalUrl?.includes(EndpointURLs[EModelEndpoint.agents])) {
|
||||
return false;
|
||||
}
|
||||
return !isAgentsEndpoint(req.body.endpoint);
|
||||
}
|
||||
|
||||
/**
|
||||
* Core function to check if a user has one or more required permissions
|
||||
* @param user - The user object
|
||||
* @param permissionType - The type of permission to check
|
||||
* @param permissions - The list of specific permissions to check
|
||||
* @param bodyProps - An optional object where keys are permissions and values are arrays of properties to check
|
||||
* @param checkObject - The object to check properties against
|
||||
* @param skipCheck - An optional function that takes the checkObject and returns true to skip permission checking
|
||||
* @returns Whether the user has the required permissions
|
||||
*/
|
||||
export const checkAccess = async ({
|
||||
req,
|
||||
user,
|
||||
permissionType,
|
||||
permissions,
|
||||
getRoleByName,
|
||||
bodyProps = {} as Record<Permissions, string[]>,
|
||||
checkObject = {},
|
||||
skipCheck,
|
||||
}: {
|
||||
user: IUser;
|
||||
req?: ServerRequest;
|
||||
permissionType: PermissionTypes;
|
||||
permissions: Permissions[];
|
||||
bodyProps?: Record<Permissions, string[]>;
|
||||
checkObject?: object;
|
||||
/** If skipCheck function is provided and returns true, skip permission checking */
|
||||
skipCheck?: (req?: ServerRequest) => boolean;
|
||||
getRoleByName: (roleName: string, fieldsToSelect?: string | string[]) => Promise<IRole | null>;
|
||||
}): Promise<boolean> => {
|
||||
if (skipCheck && skipCheck(req)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!user || !user.role) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const role = await getRoleByName(user.role);
|
||||
if (role && role.permissions && role.permissions[permissionType]) {
|
||||
const hasAnyPermission = permissions.some((permission) => {
|
||||
if (
|
||||
role.permissions?.[permissionType as keyof typeof role.permissions]?.[
|
||||
permission as keyof (typeof role.permissions)[typeof permissionType]
|
||||
]
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (bodyProps[permission] && checkObject) {
|
||||
return bodyProps[permission].some((prop) =>
|
||||
Object.prototype.hasOwnProperty.call(checkObject, prop),
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
|
||||
return hasAnyPermission;
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
/**
|
||||
* Middleware to check if a user has one or more required permissions, optionally based on `req.body` properties.
|
||||
* @param permissionType - The type of permission to check.
|
||||
* @param permissions - The list of specific permissions to check.
|
||||
* @param bodyProps - An optional object where keys are permissions and values are arrays of `req.body` properties to check.
|
||||
* @param skipCheck - An optional function that takes req.body and returns true to skip permission checking.
|
||||
* @param getRoleByName - A function to get the role by name.
|
||||
* @returns Express middleware function.
|
||||
*/
|
||||
export const generateCheckAccess = ({
|
||||
permissionType,
|
||||
permissions,
|
||||
bodyProps = {} as Record<Permissions, string[]>,
|
||||
skipCheck,
|
||||
getRoleByName,
|
||||
}: {
|
||||
permissionType: PermissionTypes;
|
||||
permissions: Permissions[];
|
||||
bodyProps?: Record<Permissions, string[]>;
|
||||
skipCheck?: (req?: ServerRequest) => boolean;
|
||||
getRoleByName: (roleName: string, fieldsToSelect?: string | string[]) => Promise<IRole | null>;
|
||||
}): ((req: ServerRequest, res: ServerResponse, next: NextFunction) => Promise<unknown>) => {
|
||||
return async (req, res, next) => {
|
||||
try {
|
||||
const hasAccess = await checkAccess({
|
||||
req,
|
||||
user: req.user as IUser,
|
||||
permissionType,
|
||||
permissions,
|
||||
bodyProps,
|
||||
checkObject: req.body,
|
||||
skipCheck,
|
||||
getRoleByName,
|
||||
});
|
||||
|
||||
if (hasAccess) {
|
||||
return next();
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
`[${permissionType}] Forbidden: "${req.originalUrl}" - Insufficient permissions for User ${req.user?.id}: ${permissions.join(', ')}`,
|
||||
);
|
||||
return res.status(403).json({ message: 'Forbidden: Insufficient permissions' });
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return res.status(500).json({
|
||||
message: `Server error: ${error instanceof Error ? error.message : 'Unknown error'}`,
|
||||
});
|
||||
}
|
||||
};
|
||||
};
|
||||
1
packages/api/src/middleware/index.ts
Normal file
1
packages/api/src/middleware/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export * from './access';
|
||||
@@ -1,6 +1,7 @@
|
||||
import { z } from 'zod';
|
||||
import { openAISchema, EModelEndpoint } from 'librechat-data-provider';
|
||||
import type { TEndpointOption, TAzureConfig, TEndpoint } from 'librechat-data-provider';
|
||||
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
||||
import type { OpenAIClientOptions } from '@librechat/agents';
|
||||
import type { AzureOptions } from './azure';
|
||||
|
||||
@@ -33,6 +34,7 @@ export type ClientOptions = OpenAIClientOptions & {
|
||||
export interface LLMConfigResult {
|
||||
llmConfig: ClientOptions;
|
||||
configOptions: OpenAIConfiguration;
|
||||
tools?: BindToolsInput[];
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { Providers } from '@librechat/agents';
|
||||
import type { Providers, ClientOptions } from '@librechat/agents';
|
||||
import type { AgentModelParameters } from 'librechat-data-provider';
|
||||
import type { OpenAIConfiguration } from './openai';
|
||||
|
||||
@@ -8,4 +8,5 @@ export type RunLLMConfig = {
|
||||
streamUsage: boolean;
|
||||
usage?: boolean;
|
||||
configuration?: OpenAIConfiguration;
|
||||
} & AgentModelParameters;
|
||||
} & AgentModelParameters &
|
||||
ClientOptions;
|
||||
|
||||
@@ -5,6 +5,7 @@ export * from './env';
|
||||
export * from './events';
|
||||
export * from './files';
|
||||
export * from './generators';
|
||||
export * from './key';
|
||||
export * from './llm';
|
||||
export * from './math';
|
||||
export * from './openid';
|
||||
|
||||
70
packages/api/src/utils/key.ts
Normal file
70
packages/api/src/utils/key.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import axios from 'axios';
|
||||
import { logger } from '@librechat/data-schemas';
|
||||
|
||||
export interface GoogleServiceKey {
|
||||
type?: string;
|
||||
project_id?: string;
|
||||
private_key_id?: string;
|
||||
private_key?: string;
|
||||
client_email?: string;
|
||||
client_id?: string;
|
||||
auth_uri?: string;
|
||||
token_uri?: string;
|
||||
auth_provider_x509_cert_url?: string;
|
||||
client_x509_cert_url?: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load Google service key from file path or URL
|
||||
* @param keyPath - The path or URL to the service key file
|
||||
* @returns The parsed service key object or null if failed
|
||||
*/
|
||||
export async function loadServiceKey(keyPath: string): Promise<GoogleServiceKey | null> {
|
||||
if (!keyPath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let serviceKey: unknown;
|
||||
|
||||
// Check if it's a URL
|
||||
if (/^https?:\/\//.test(keyPath)) {
|
||||
try {
|
||||
const response = await axios.get(keyPath);
|
||||
serviceKey = response.data;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to fetch the service key from URL: ${keyPath}`, error);
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
// It's a file path
|
||||
try {
|
||||
const absolutePath = path.isAbsolute(keyPath) ? keyPath : path.resolve(keyPath);
|
||||
const fileContent = fs.readFileSync(absolutePath, 'utf8');
|
||||
serviceKey = JSON.parse(fileContent);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to load service key from file: ${keyPath}`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// If the response is a string (e.g., from a URL that returns JSON as text), parse it
|
||||
if (typeof serviceKey === 'string') {
|
||||
try {
|
||||
serviceKey = JSON.parse(serviceKey);
|
||||
} catch (parseError) {
|
||||
logger.error(`Failed to parse service key JSON from ${keyPath}`, parseError);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the service key has required fields
|
||||
if (!serviceKey || typeof serviceKey !== 'object') {
|
||||
logger.error(`Invalid service key format from ${keyPath}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return serviceKey as GoogleServiceKey;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.7.88",
|
||||
"version": "0.7.899",
|
||||
"description": "data services for librechat apps",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/index.es.js",
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
/* eslint-disable jest/no-conditional-expect */
|
||||
import { ZodError, z } from 'zod';
|
||||
import { generateDynamicSchema, validateSettingDefinitions, OptionTypes } from '../src/generate';
|
||||
import type { SettingsConfiguration } from '../src/generate';
|
||||
@@ -97,6 +96,37 @@ describe('generateDynamicSchema', () => {
|
||||
expect(result['data']).toEqual({ testEnum: 'option2' });
|
||||
});
|
||||
|
||||
it('should generate a schema for enum settings with empty string option', () => {
|
||||
const settings: SettingsConfiguration = [
|
||||
{
|
||||
key: 'testEnumWithEmpty',
|
||||
description: 'A test enum setting with empty string',
|
||||
type: 'enum',
|
||||
default: '',
|
||||
options: ['', 'option1', 'option2'],
|
||||
enumMappings: {
|
||||
'': 'None',
|
||||
option1: 'First Option',
|
||||
option2: 'Second Option',
|
||||
},
|
||||
component: 'slider',
|
||||
columnSpan: 2,
|
||||
label: 'Test Enum with Empty String',
|
||||
},
|
||||
];
|
||||
|
||||
const schema = generateDynamicSchema(settings);
|
||||
const result = schema.safeParse({ testEnumWithEmpty: '' });
|
||||
|
||||
expect(result.success).toBeTruthy();
|
||||
expect(result['data']).toEqual({ testEnumWithEmpty: '' });
|
||||
|
||||
// Test with non-empty option
|
||||
const result2 = schema.safeParse({ testEnumWithEmpty: 'option1' });
|
||||
expect(result2.success).toBeTruthy();
|
||||
expect(result2['data']).toEqual({ testEnumWithEmpty: 'option1' });
|
||||
});
|
||||
|
||||
it('should fail for incorrect enum value', () => {
|
||||
const settings: SettingsConfiguration = [
|
||||
{
|
||||
@@ -481,6 +511,47 @@ describe('validateSettingDefinitions', () => {
|
||||
|
||||
expect(() => validateSettingDefinitions(settingsExceedingMaxTags)).toThrow(ZodError);
|
||||
});
|
||||
|
||||
// Test for incomplete enumMappings
|
||||
test('should throw error for incomplete enumMappings', () => {
|
||||
const settingsWithIncompleteEnumMappings: SettingsConfiguration = [
|
||||
{
|
||||
key: 'displayMode',
|
||||
type: 'enum',
|
||||
component: 'dropdown',
|
||||
options: ['light', 'dark', 'auto'],
|
||||
enumMappings: {
|
||||
light: 'Light Mode',
|
||||
dark: 'Dark Mode',
|
||||
// Missing mapping for 'auto'
|
||||
},
|
||||
optionType: OptionTypes.Custom,
|
||||
},
|
||||
];
|
||||
|
||||
expect(() => validateSettingDefinitions(settingsWithIncompleteEnumMappings)).toThrow(ZodError);
|
||||
});
|
||||
|
||||
// Test for complete enumMappings including empty string
|
||||
test('should not throw error for complete enumMappings including empty string', () => {
|
||||
const settingsWithCompleteEnumMappings: SettingsConfiguration = [
|
||||
{
|
||||
key: 'selectionMode',
|
||||
type: 'enum',
|
||||
component: 'slider',
|
||||
options: ['', 'single', 'multiple'],
|
||||
enumMappings: {
|
||||
'': 'None',
|
||||
single: 'Single Selection',
|
||||
multiple: 'Multiple Selection',
|
||||
},
|
||||
default: '',
|
||||
optionType: OptionTypes.Custom,
|
||||
},
|
||||
];
|
||||
|
||||
expect(() => validateSettingDefinitions(settingsWithCompleteEnumMappings)).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
const settingsConfiguration: SettingsConfiguration = [
|
||||
@@ -515,7 +586,7 @@ const settingsConfiguration: SettingsConfiguration = [
|
||||
{
|
||||
key: 'presence_penalty',
|
||||
description:
|
||||
'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
||||
type: 'number',
|
||||
default: 0,
|
||||
range: {
|
||||
@@ -529,7 +600,7 @@ const settingsConfiguration: SettingsConfiguration = [
|
||||
{
|
||||
key: 'frequency_penalty',
|
||||
description:
|
||||
'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
type: 'number',
|
||||
default: 0,
|
||||
range: {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user