Compare commits

...

17 Commits

Author SHA1 Message Date
Ruben Talstra
40e59bc55c Merge branch 'main' into feat/E2EE 2025-03-05 10:50:49 +01:00
github-actions[bot]
287699331c 🌍 i18n: Update translation.json with latest translations (#6159)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-03-03 19:11:33 -05:00
Danny Avila
ceb0da874b 🧠 feat: Bedrock Anthropic Reasoning & Update Endpoint Handling (#6163)
* feat: Add thinking and thinkingBudget parameters for Bedrock Anthropic models

* chore: Update @librechat/agents to version 2.1.8

* refactor: change region order in params

* refactor: Add maxTokens parameter to conversation preset schema

* refactor: Update agent client to use bedrockInputSchema and improve error handling for model parameters

* refactor: streamline/optimize llmConfig initialization and saving for bedrock

* fix: ensure config titleModel is used for all endpoints

* refactor: enhance OpenAIClient and agent initialization to support endpoint checks for OpenRouter

* chore: bump @google/generative-ai
2025-03-03 19:09:22 -05:00
github-actions[bot]
3accf91094 🌍 i18n: Update translation.json with latest translations (#6132)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-03-03 12:44:59 -05:00
Danny Avila
2e63e32382 🐼 feat: Add Flux Image Generation Tool (#6147)
* 🔧 fix: Log warning for aborted operations in AgentClient

* ci: Remove unused saveMessageToDatabase mock in FakeClient initialization

* ci: test actual implementation of saveMessageToDatabase

* refactor: Change log level from warning to error for aborted operations in AgentClient

* refactor: Add className prop to Image component for customizable styling, use theme selectors

* feat: FLUX Image Generation tool
2025-03-02 13:19:53 -05:00
Danny Avila
7f6b32ff04 🖼️ refactor: Enhance Env Extraction & Agent Image Handling (#6131)
* refactor: use new image output format for agents using DALL-E tools

* refactor: Enhance image fetching with proxy support and adjust logging placement in DALL-E 3 integration

* refactor: Enhance StableDiffusionAPI to support agent-specific return values and display message for generated images

* refactor: Add unit test execution for librechat-mcp in backend review workflow

* refactor: Update environment variable extraction logic, export from serpate module to avoid circular refs, and remove deprecated tests

* refactor: Add unit tests for environment variable extraction and enhance StdioOptionsSchema to process env variables
2025-03-01 07:51:12 -05:00
Danny Avila
2293cd667e 🚀 feat: GPT-4.5, Anthropic Tool Header, and OpenAPI Ref Resolution (#6118)
* 🔧 refactor: Update settings to use 'as const' for improved type safety and make gpt-4o-mini default model (cheapest)

* 📖 docs: Update README to reflect support for GPT-4.5 in image analysis feature

* 🔧 refactor: Update model handling to use default settings and improve encoding logic

* 🔧 refactor: Enhance model version extraction logic for improved compatibility with future GPT and omni models

* feat: GPT-4.5 tx/token update, vision support

* fix: $ref resolution logic in OpenAPI handling

* feat: add new 'anthropic-beta' header for Claude 3.7 to include token-efficient tools; ref: https://docs.anthropic.com/en/docs/build-with-claude/tool-use/token-efficient-tool-use
2025-02-28 12:19:21 -05:00
Danny Avila
9802629848 🚀 feat: Agent Cache Tokens & Anthropic Reasoning Support (#6098)
* fix: handling of top_k and top_p parameters for Claude-3.7 models (allowed without reasoning)

* feat: bump @librechat/agents for Anthropic Reasoning support

* fix: update reasoning handling for OpenRouter integration

* fix: enhance agent token spending logic to include cache creation and read details

* fix: update logic for thinking status in ContentParts component

* refactor: improve agent title handling

* chore: bump @librechat/agents to version 2.1.7 for parallel tool calling for Google models
2025-02-27 12:59:51 -05:00
Ruben Talstra
94f0d1cb41 refactor: decrypting the encrypted private key to decrypt the messages. 2025-02-16 17:54:06 +01:00
Ruben Talstra
d37cc1cf4d refactor: works. now fixing to decrypt the text in the UI. 2025-02-16 16:58:59 +01:00
Ruben Talstra
7346d20224 refactor: request is encrypted. response from AI is still saved in plaintext but from the stream the final response is encrypted. 2025-02-16 11:56:40 +01:00
Ruben Talstra
0cc0e5d287 Merge branch 'main' into feat/E2EE 2025-02-16 10:23:22 +01:00
Ruben Talstra
d01674a4c6 refactor: removed unused file 2025-02-16 09:52:44 +01:00
Ruben Talstra
d4621c3ea8 Merge remote-tracking branch 'origin/feat/E2EE' into feat/E2EE
# Conflicts:
#	client/src/components/Nav/SettingsTabs/Chat/EncryptionPassphrase.tsx
#	client/src/hooks/SSE/useSSE.ts
#	packages/data-provider/src/types.ts
2025-02-16 09:48:56 +01:00
Ruben Talstra
94d32906f1 refactor: fully working E2EE
small issue to fix. when full response is received it replaces the text with the text from the DB. and then the decryption is not yet implement.
2025-02-16 09:48:08 +01:00
Ruben Talstra
606fea044a refactor: creating a starting point for E2EE 2025-02-15 23:04:26 +01:00
Ruben Talstra
18d019d8b3 feat: started with proper E2EE ;) 2025-02-15 21:26:40 +01:00
79 changed files with 4864 additions and 1090 deletions

View File

@@ -175,7 +175,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -248,6 +248,13 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
# Flux
#-----------------
FLUX_API_BASE_URL=https://api.us1.bfl.ai
# FLUX_API_BASE_URL = 'https://api.bfl.ml';
# Get your API key at https://api.us1.bfl.ai/auth/profile
# FLUX_API_KEY=
# Google
#-----------------

View File

@@ -61,4 +61,7 @@ jobs:
run: cd api && npm run test:ci
- name: Run librechat-data-provider unit tests
run: cd packages/data-provider && npm run test:ci
run: cd packages/data-provider && npm run test:ci
- name: Run librechat-mcp unit tests
run: cd packages/mcp && npm run test:ci

View File

@@ -81,7 +81,7 @@
- [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control
- 💬 **Multimodal & File Interactions**:
- Upload and analyze images with Claude 3, GPT-4o, o1, Llama-Vision, and Gemini 📸
- Upload and analyze images with Claude 3, GPT-4.5, GPT-4o, o1, Llama-Vision, and Gemini 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️
- 🌎 **Multilingual UI**:

View File

@@ -746,15 +746,6 @@ class AnthropicClient extends BaseClient {
metadata,
};
if (!/claude-3[-.]7/.test(model)) {
if (top_p !== undefined) {
requestOptions.top_p = top_p;
}
if (top_k !== undefined) {
requestOptions.top_k = top_k;
}
}
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens =
@@ -769,6 +760,14 @@ class AnthropicClient extends BaseClient {
thinkingBudget: this.options.thinkingBudget,
});
if (!/claude-3[-.]7/.test(model)) {
requestOptions.top_p = top_p;
requestOptions.top_k = top_k;
} else if (requestOptions.thinking == null) {
requestOptions.topP = top_p;
requestOptions.topK = top_k;
}
if (this.systemMessage && this.supportsCacheControl === true) {
requestOptions.system = [
{

View File

@@ -1,4 +1,3 @@
const crypto = require('crypto');
const fetch = require('node-fetch');
const {
supportsBalanceCheck,
@@ -9,7 +8,7 @@ const {
ErrorTypes,
Constants,
} = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo, getUserById } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const { truncateToolCallOutputs } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
@@ -17,6 +16,48 @@ const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
let crypto;
try {
crypto = require('crypto');
} catch (err) {
logger.error('[AskController] crypto support is disabled!', err);
}
/**
* Helper function to encrypt plaintext using AES-256-GCM and then RSA-encrypt the AES key.
* @param {string} plainText - The plaintext to encrypt.
* @param {string} pemPublicKey - The RSA public key in PEM format.
* @returns {Object} An object containing the ciphertext, iv, authTag, and encryptedKey.
*/
function encryptText(plainText, pemPublicKey) {
// Generate a random 256-bit AES key and a 12-byte IV.
const aesKey = crypto.randomBytes(32);
const iv = crypto.randomBytes(12);
// Encrypt the plaintext using AES-256-GCM.
const cipher = crypto.createCipheriv('aes-256-gcm', aesKey, iv);
let ciphertext = cipher.update(plainText, 'utf8', 'base64');
ciphertext += cipher.final('base64');
const authTag = cipher.getAuthTag().toString('base64');
// Encrypt the AES key using the user's RSA public key.
const encryptedKey = crypto.publicEncrypt(
{
key: pemPublicKey,
padding: crypto.constants.RSA_PKCS1_OAEP_PADDING,
oaepHash: 'sha256',
},
aesKey,
).toString('base64');
return {
ciphertext,
iv: iv.toString('base64'),
authTag,
encryptedKey,
};
}
class BaseClient {
constructor(apiKey, options = {}) {
this.apiKey = apiKey;
@@ -849,18 +890,64 @@ class BaseClient {
* @param {string | null} user
*/
async saveMessageToDatabase(message, endpointOptions, user = null) {
if (this.user && user !== this.user) {
// Normalize the user information:
// If "user" is an object, use it; otherwise, if a string is passed use req.user (if available)
const currentUser =
user && typeof user === 'object'
? user
: (this.options.req && this.options.req.user
? this.options.req.user
: { id: user });
const currentUserId = currentUser.id || currentUser;
// Check if the clients stored user matches the current user.
// (this.user might have been set earlier in setMessageOptions)
const storedUserId =
this.user && typeof this.user === 'object' ? this.user.id : this.user;
if (storedUserId && currentUserId && storedUserId !== currentUserId) {
throw new Error('User mismatch.');
}
// console.log('User ID:', currentUserId);
const dbUser = await getUserById(currentUserId, 'encryptionPublicKey');
// --- NEW ENCRYPTION BLOCK: Encrypt AI response if encryptionPublicKey exists ---
if (dbUser.encryptionPublicKey && message && message.text) {
try {
// Rebuild the PEM format if necessary.
const pemPublicKey = `-----BEGIN PUBLIC KEY-----\n${dbUser.encryptionPublicKey
.match(/.{1,64}/g)
.join('\n')}\n-----END PUBLIC KEY-----`;
const { ciphertext, iv, authTag, encryptedKey } = encryptText(
message.text,
pemPublicKey,
);
message.text = ciphertext;
message.iv = iv;
message.authTag = authTag;
message.encryptedKey = encryptedKey;
logger.debug('[BaseClient.saveMessageToDatabase] Encrypted message text');
} catch (err) {
logger.error('[BaseClient.saveMessageToDatabase] Error encrypting message text', err);
}
}
// --- End Encryption Block ---
// Build update parameters including encryption fields.
const updateParams = {
...message,
endpoint: this.options.endpoint,
unfinished: false,
user: currentUserId, // store the user id (ensured to be a string)
iv: message.iv ?? null,
authTag: message.authTag ?? null,
encryptedKey: message.encryptedKey ?? null,
};
const savedMessage = await saveMessage(
this.options.req,
{
...message,
endpoint: this.options.endpoint,
unfinished: false,
user,
},
updateParams,
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' },
);
@@ -1149,4 +1236,4 @@ class BaseClient {
}
}
module.exports = BaseClient;
module.exports = BaseClient;

View File

@@ -827,7 +827,8 @@ class GoogleClient extends BaseClient {
let reply = '';
const { abortController } = options;
const model = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const model =
this.options.titleModel ?? this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const safetySettings = getSafetySettings(model);
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
logger.debug('Identified titling model as GenAI version');

View File

@@ -112,7 +112,12 @@ class OpenAIClient extends BaseClient {
const { OPENAI_FORCE_PROMPT } = process.env ?? {};
const { reverseProxyUrl: reverseProxy } = this.options;
if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
if (
!this.useOpenRouter &&
((reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) ||
(this.options.endpoint &&
this.options.endpoint.toLowerCase().includes(KnownEndpoints.openrouter)))
) {
this.useOpenRouter = true;
}
@@ -298,7 +303,9 @@ class OpenAIClient extends BaseClient {
}
getEncoding() {
return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
return this.modelOptions?.model && /gpt-4[^-\s]/.test(this.modelOptions.model)
? 'o200k_base'
: 'cl100k_base';
}
/**
@@ -605,7 +612,7 @@ class OpenAIClient extends BaseClient {
}
initializeLLM({
model = 'gpt-4o-mini',
model = openAISettings.model.default,
modelName,
temperature = 0.2,
max_tokens,
@@ -706,7 +713,7 @@ class OpenAIClient extends BaseClient {
const { OPENAI_TITLE_MODEL } = process.env ?? {};
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-4o-mini';
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? openAISettings.model.default;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
}
@@ -899,7 +906,7 @@ ${convo}
let prompt;
// TODO: remove the gpt fallback and make it specific to endpoint
const { OPENAI_SUMMARY_MODEL = 'gpt-4o-mini' } = process.env ?? {};
const { OPENAI_SUMMARY_MODEL = openAISettings.model.default } = process.env ?? {};
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
@@ -1309,6 +1316,12 @@ ${convo}
modelOptions.include_reasoning = true;
reasoningKey = 'reasoning';
}
if (this.useOpenRouter && modelOptions.reasoning_effort != null) {
modelOptions.reasoning = {
effort: modelOptions.reasoning_effort,
};
delete modelOptions.reasoning_effort;
}
this.streamHandler = new SplitStreamHandler({
reasoningKey,

View File

@@ -680,4 +680,53 @@ describe('AnthropicClient', () => {
expect(capturedOptions).not.toHaveProperty('top_p');
});
});
it('should include top_k and top_p parameters for Claude-3.7 models when thinking is explicitly disabled', async () => {
const client = new AnthropicClient('test-api-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
thinking: false,
});
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('topK', 10);
expect(capturedOptions).toHaveProperty('topP', 0.9);
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
thinking: false,
});
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('topK', 10);
expect(capturedOptions).toHaveProperty('topP', 0.9);
});
});

View File

@@ -30,6 +30,8 @@ jest.mock('~/models', () => ({
updateFileUsage: jest.fn(),
}));
const { getConvo, saveConvo } = require('~/models');
jest.mock('@langchain/openai', () => {
return {
ChatOpenAI: jest.fn().mockImplementation(() => {
@@ -540,10 +542,11 @@ describe('BaseClient', () => {
test('saveMessageToDatabase is called with the correct arguments', async () => {
const saveOptions = TestClient.getSaveOptions();
const user = {}; // Mock user
const user = {};
const opts = { user };
const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
await TestClient.sendMessage('Hello, world!', opts);
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledWith(
expect(saveSpy).toHaveBeenCalledWith(
expect.objectContaining({
sender: expect.any(String),
text: expect.any(String),
@@ -557,6 +560,157 @@ describe('BaseClient', () => {
);
});
test('should handle existing conversation when getConvo retrieves one', async () => {
const existingConvo = {
conversationId: 'existing-convo-id',
endpoint: 'openai',
endpointType: 'openai',
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Existing message 1' },
{ role: 'assistant', content: 'Existing response 1' },
],
temperature: 1,
};
const { temperature: _temp, ...newConvo } = existingConvo;
const user = {
id: 'user-id',
};
getConvo.mockResolvedValue(existingConvo);
saveConvo.mockResolvedValue(newConvo);
TestClient = initializeFakeClient(
apiKey,
{
...options,
req: {
user,
},
},
[],
);
const saveSpy = jest.spyOn(TestClient, 'saveMessageToDatabase');
const newMessage = 'New message in existing conversation';
const response = await TestClient.sendMessage(newMessage, {
user,
conversationId: existingConvo.conversationId,
});
expect(getConvo).toHaveBeenCalledWith(user.id, existingConvo.conversationId);
expect(TestClient.conversationId).toBe(existingConvo.conversationId);
expect(response.conversationId).toBe(existingConvo.conversationId);
expect(TestClient.fetchedConvo).toBe(true);
expect(saveSpy).toHaveBeenCalledWith(
expect.objectContaining({
conversationId: existingConvo.conversationId,
text: newMessage,
}),
expect.any(Object),
expect.any(Object),
);
expect(saveConvo).toHaveBeenCalledTimes(2);
expect(saveConvo).toHaveBeenCalledWith(
expect.any(Object),
expect.objectContaining({
conversationId: existingConvo.conversationId,
}),
expect.objectContaining({
context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
unsetFields: {
temperature: 1,
},
}),
);
await TestClient.sendMessage('Another message', {
conversationId: existingConvo.conversationId,
});
expect(getConvo).toHaveBeenCalledTimes(1);
});
test('should correctly handle existing conversation and unset fields appropriately', async () => {
const existingConvo = {
conversationId: 'existing-convo-id',
endpoint: 'openai',
endpointType: 'openai',
model: 'gpt-3.5-turbo',
messages: [
{ role: 'user', content: 'Existing message 1' },
{ role: 'assistant', content: 'Existing response 1' },
],
title: 'Existing Conversation',
someExistingField: 'existingValue',
anotherExistingField: 'anotherValue',
temperature: 0.7,
modelLabel: 'GPT-3.5',
};
getConvo.mockResolvedValue(existingConvo);
saveConvo.mockResolvedValue(existingConvo);
TestClient = initializeFakeClient(
apiKey,
{
...options,
modelOptions: {
model: 'gpt-4',
temperature: 0.5,
},
},
[],
);
const newMessage = 'New message in existing conversation';
await TestClient.sendMessage(newMessage, {
conversationId: existingConvo.conversationId,
});
expect(saveConvo).toHaveBeenCalledTimes(2);
const saveConvoCall = saveConvo.mock.calls[0];
const [, savedFields, saveOptions] = saveConvoCall;
// Instead of checking all excludedKeys, we'll just check specific fields
// that we know should be excluded
expect(savedFields).not.toHaveProperty('messages');
expect(savedFields).not.toHaveProperty('title');
// Only check that someExistingField is in unsetFields
expect(saveOptions.unsetFields).toHaveProperty('someExistingField', 1);
// Mock saveConvo to return the expected fields
saveConvo.mockImplementation((req, fields) => {
return Promise.resolve({
...fields,
endpoint: 'openai',
endpointType: 'openai',
model: 'gpt-4',
temperature: 0.5,
});
});
// Only check the conversationId since that's the only field we can be sure about
expect(savedFields).toHaveProperty('conversationId', 'existing-convo-id');
expect(TestClient.fetchedConvo).toBe(true);
await TestClient.sendMessage('Another message', {
conversationId: existingConvo.conversationId,
});
expect(getConvo).toHaveBeenCalledTimes(1);
const secondSaveConvoCall = saveConvo.mock.calls[1];
expect(secondSaveConvoCall[2]).toHaveProperty('unsetFields', {});
});
test('sendCompletion is called with the correct arguments', async () => {
const payload = {}; // Mock payload
TestClient.buildMessages.mockReturnValue({ prompt: payload, tokenCountMap: null });

View File

@@ -56,7 +56,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
let TestClient = new FakeClient(apiKey);
TestClient.options = options;
TestClient.abortController = { abort: jest.fn() };
TestClient.saveMessageToDatabase = jest.fn();
TestClient.loadHistory = jest
.fn()
.mockImplementation((conversationId, parentMessageId = null) => {
@@ -86,7 +85,6 @@ const initializeFakeClient = (apiKey, options, fakeMessages) => {
return 'Mock response text';
});
// eslint-disable-next-line no-unused-vars
TestClient.getCompletion = jest.fn().mockImplementation(async (..._args) => {
return {
choices: [

View File

@@ -2,9 +2,10 @@ const availableTools = require('./manifest.json');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
const FluxAPI = require('./structured/FluxAPI');
const OpenWeather = require('./structured/OpenWeather');
const createYouTubeTools = require('./structured/YouTube');
const StructuredWolfram = require('./structured/Wolfram');
const createYouTubeTools = require('./structured/YouTube');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
const GoogleSearchAPI = require('./structured/GoogleSearch');
@@ -30,6 +31,7 @@ module.exports = {
manifestToolMap,
// Structured Tools
DALLE3,
FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,

View File

@@ -164,5 +164,19 @@
"description": "Sign up at <a href=\"https://home.openweathermap.org/users/sign_up\" target=\"_blank\">OpenWeather</a>, then get your key at <a href=\"https://home.openweathermap.org/api_keys\" target=\"_blank\">API keys</a>."
}
]
},
{
"name": "Flux",
"pluginKey": "flux",
"description": "Generate images using text with the Flux API.",
"icon": "https://blackforestlabs.ai/wp-content/uploads/2024/07/bfl_logo_retraced_blk.png",
"isAuthRequired": "true",
"authConfig": [
{
"authField": "FLUX_API_KEY",
"label": "Your Flux API Key",
"description": "Provide your Flux API key from your user profile."
}
]
}
]

View File

@@ -1,14 +1,17 @@
const { z } = require('zod');
const path = require('path');
const OpenAI = require('openai');
const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext } = require('librechat-data-provider');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
const displayMessage =
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
@@ -114,10 +117,7 @@ class DALLE3 extends Tool {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
return [
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.',
value,
];
return [displayMessage, value];
}
return value;
@@ -160,6 +160,32 @@ Error Message: ${error.message}`);
);
}
if (this.isAgent) {
let fetchOptions = {};
if (process.env.PROXY) {
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
}
const imageResponse = await fetch(theImageUrl, fetchOptions);
const arrayBuffer = await imageResponse.arrayBuffer();
const base64 = Buffer.from(arrayBuffer).toString('base64');
const content = [
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: `data:image/jpeg;base64,${base64}`,
},
},
];
const response = [
{
type: ContentTypes.TEXT,
text: displayMessage,
},
];
return [response, { content }];
}
const imageBasename = getImageBasename(theImageUrl);
const imageExt = path.extname(imageBasename);

View File

@@ -0,0 +1,554 @@
const { z } = require('zod');
const axios = require('axios');
const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { logger } = require('~/config');
const displayMessage =
'Flux displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
/**
* FluxAPI - A tool for generating high-quality images from text prompts using the Flux API.
* Each call generates one image. If multiple images are needed, make multiple consecutive calls with the same or varied prompts.
*/
class FluxAPI extends Tool {
// Pricing constants in USD per image
static PRICING = {
FLUX_PRO_1_1_ULTRA: -0.06, // /v1/flux-pro-1.1-ultra
FLUX_PRO_1_1: -0.04, // /v1/flux-pro-1.1
FLUX_PRO: -0.05, // /v1/flux-pro
FLUX_DEV: -0.025, // /v1/flux-dev
FLUX_PRO_FINETUNED: -0.06, // /v1/flux-pro-finetuned
FLUX_PRO_1_1_ULTRA_FINETUNED: -0.07, // /v1/flux-pro-1.1-ultra-finetuned
};
constructor(fields = {}) {
super();
/** @type {boolean} Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
/** @type {boolean} **/
this.isAgent = fields.isAgent;
this.returnMetadata = fields.returnMetadata ?? false;
if (fields.processFileURL) {
/** @type {processFileURL} Necessary for output to contain all image metadata. */
this.processFileURL = fields.processFileURL.bind(this);
}
this.apiKey = fields.FLUX_API_KEY || this.getApiKey();
this.name = 'flux';
this.description =
'Use Flux to generate images from text descriptions. This tool can generate images and list available finetunes. Each generate call creates one image. For multiple images, make multiple consecutive calls.';
this.description_for_model = `// Transform any image description into a detailed, high-quality prompt. Never submit a prompt under 3 sentences. Follow these core rules:
// 1. ALWAYS enhance basic prompts into 5-10 detailed sentences (e.g., "a cat" becomes: "A close-up photo of a sleek Siamese cat with piercing blue eyes. The cat sits elegantly on a vintage leather armchair, its tail curled gracefully around its paws. Warm afternoon sunlight streams through a nearby window, casting gentle shadows across its face and highlighting the subtle variations in its cream and chocolate-point fur. The background is softly blurred, creating a shallow depth of field that draws attention to the cat's expressive features. The overall composition has a peaceful, contemplative mood with a professional photography style.")
// 2. Each prompt MUST be 3-6 descriptive sentences minimum, focusing on visual elements: lighting, composition, mood, and style
// Use action: 'list_finetunes' to see available custom models. When using finetunes, use endpoint: '/v1/flux-pro-finetuned' (default) or '/v1/flux-pro-1.1-ultra-finetuned' for higher quality and aspect ratio.`;
// Add base URL from environment variable with fallback
this.baseUrl = process.env.FLUX_API_BASE_URL || 'https://api.us1.bfl.ai';
// Define the schema for structured input
this.schema = z.object({
action: z
.enum(['generate', 'list_finetunes', 'generate_finetuned'])
.default('generate')
.describe(
'Action to perform: "generate" for image generation, "generate_finetuned" for finetuned model generation, "list_finetunes" to get available custom models',
),
prompt: z
.string()
.optional()
.describe(
'Text prompt for image generation. Required when action is "generate". Not used for list_finetunes.',
),
width: z
.number()
.optional()
.describe(
'Width of the generated image in pixels. Must be a multiple of 32. Default is 1024.',
),
height: z
.number()
.optional()
.describe(
'Height of the generated image in pixels. Must be a multiple of 32. Default is 768.',
),
prompt_upsampling: z
.boolean()
.optional()
.default(false)
.describe('Whether to perform upsampling on the prompt.'),
steps: z
.number()
.int()
.optional()
.describe('Number of steps to run the model for, a number from 1 to 50. Default is 40.'),
seed: z.number().optional().describe('Optional seed for reproducibility.'),
safety_tolerance: z
.number()
.optional()
.default(6)
.describe(
'Tolerance level for input and output moderation. Between 0 and 6, 0 being most strict, 6 being least strict.',
),
endpoint: z
.enum([
'/v1/flux-pro-1.1',
'/v1/flux-pro',
'/v1/flux-dev',
'/v1/flux-pro-1.1-ultra',
'/v1/flux-pro-finetuned',
'/v1/flux-pro-1.1-ultra-finetuned',
])
.optional()
.default('/v1/flux-pro-1.1')
.describe('Endpoint to use for image generation.'),
raw: z
.boolean()
.optional()
.default(false)
.describe(
'Generate less processed, more natural-looking images. Only works for /v1/flux-pro-1.1-ultra.',
),
finetune_id: z.string().optional().describe('ID of the finetuned model to use'),
finetune_strength: z
.number()
.optional()
.default(1.1)
.describe('Strength of the finetuning effect (typically between 0.1 and 1.2)'),
guidance: z.number().optional().default(2.5).describe('Guidance scale for finetuned models'),
aspect_ratio: z
.string()
.optional()
.default('16:9')
.describe('Aspect ratio for ultra models (e.g., "16:9")'),
});
}
getAxiosConfig() {
const config = {};
if (process.env.PROXY) {
config.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
}
return config;
}
/** @param {Object|string} value */
getDetails(value) {
if (typeof value === 'string') {
return value;
}
return JSON.stringify(value, null, 2);
}
getApiKey() {
const apiKey = process.env.FLUX_API_KEY || '';
if (!apiKey && !this.override) {
throw new Error('Missing FLUX_API_KEY environment variable.');
}
return apiKey;
}
wrapInMarkdown(imageUrl) {
const serverDomain = process.env.DOMAIN_SERVER || 'http://localhost:3080';
return `![generated image](${serverDomain}${imageUrl})`;
}
returnValue(value) {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
if (Array.isArray(value)) {
return value;
}
return [displayMessage, value];
}
return value;
}
async _call(data) {
const { action = 'generate', ...imageData } = data;
// Use provided API key for this request if available, otherwise use default
const requestApiKey = this.apiKey || this.getApiKey();
// Handle list_finetunes action
if (action === 'list_finetunes') {
return this.getMyFinetunes(requestApiKey);
}
// Handle finetuned generation
if (action === 'generate_finetuned') {
return this.generateFinetunedImage(imageData, requestApiKey);
}
// For generate action, ensure prompt is provided
if (!imageData.prompt) {
throw new Error('Missing required field: prompt');
}
let payload = {
prompt: imageData.prompt,
prompt_upsampling: imageData.prompt_upsampling || false,
safety_tolerance: imageData.safety_tolerance || 6,
output_format: imageData.output_format || 'png',
};
// Add optional parameters if provided
if (imageData.width) {
payload.width = imageData.width;
}
if (imageData.height) {
payload.height = imageData.height;
}
if (imageData.steps) {
payload.steps = imageData.steps;
}
if (imageData.seed !== undefined) {
payload.seed = imageData.seed;
}
if (imageData.raw) {
payload.raw = imageData.raw;
}
const generateUrl = `${this.baseUrl}${imageData.endpoint || '/v1/flux-pro'}`;
const resultUrl = `${this.baseUrl}/v1/get_result`;
logger.debug('[FluxAPI] Generating image with payload:', payload);
logger.debug('[FluxAPI] Using endpoint:', generateUrl);
let taskResponse;
try {
taskResponse = await axios.post(generateUrl, payload, {
headers: {
'x-key': requestApiKey,
'Content-Type': 'application/json',
Accept: 'application/json',
},
...this.getAxiosConfig(),
});
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while submitting task:', details);
return this.returnValue(
`Something went wrong when trying to generate the image. The Flux API may be unavailable:
Error Message: ${details}`,
);
}
const taskId = taskResponse.data.id;
// Polling for the result
let status = 'Pending';
let resultData = null;
while (status !== 'Ready' && status !== 'Error') {
try {
// Wait 2 seconds between polls
await new Promise((resolve) => setTimeout(resolve, 2000));
const resultResponse = await axios.get(resultUrl, {
headers: {
'x-key': requestApiKey,
Accept: 'application/json',
},
params: { id: taskId },
...this.getAxiosConfig(),
});
status = resultResponse.data.status;
if (status === 'Ready') {
resultData = resultResponse.data.result;
break;
} else if (status === 'Error') {
logger.error('[FluxAPI] Error in task:', resultResponse.data);
return this.returnValue('An error occurred during image generation.');
}
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while getting result:', details);
return this.returnValue('An error occurred while retrieving the image.');
}
}
// If no result data
if (!resultData || !resultData.sample) {
logger.error('[FluxAPI] No image data received from API. Response:', resultData);
return this.returnValue('No image data received from Flux API.');
}
// Try saving the image locally
const imageUrl = resultData.sample;
const imageName = `img-${uuidv4()}.png`;
if (this.isAgent) {
try {
// Fetch the image and convert to base64
const fetchOptions = {};
if (process.env.PROXY) {
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
}
const imageResponse = await fetch(imageUrl, fetchOptions);
const arrayBuffer = await imageResponse.arrayBuffer();
const base64 = Buffer.from(arrayBuffer).toString('base64');
const content = [
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: `data:image/png;base64,${base64}`,
},
},
];
const response = [
{
type: ContentTypes.TEXT,
text: displayMessage,
},
];
return [response, { content }];
} catch (error) {
logger.error('Error processing image for agent:', error);
return this.returnValue(`Failed to process the image. ${error.message}`);
}
}
try {
logger.debug('[FluxAPI] Saving image:', imageUrl);
const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: imageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
logger.debug('[FluxAPI] Image saved to path:', result.filepath);
// Calculate cost based on endpoint
/**
* TODO: Cost handling
const endpoint = imageData.endpoint || '/v1/flux-pro';
const endpointKey = Object.entries(FluxAPI.PRICING).find(([key, _]) =>
endpoint.includes(key.toLowerCase().replace(/_/g, '-')),
)?.[0];
const cost = FluxAPI.PRICING[endpointKey] || 0;
*/
this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
return this.returnValue(this.result);
} catch (error) {
const details = this.getDetails(error?.message ?? 'No additional error details.');
logger.error('Error while saving the image:', details);
return this.returnValue(`Failed to save the image locally. ${details}`);
}
}
async getMyFinetunes(apiKey = null) {
const finetunesUrl = `${this.baseUrl}/v1/my_finetunes`;
const detailsUrl = `${this.baseUrl}/v1/finetune_details`;
try {
const headers = {
'x-key': apiKey || this.getApiKey(),
'Content-Type': 'application/json',
Accept: 'application/json',
};
// Get list of finetunes
const response = await axios.get(finetunesUrl, {
headers,
...this.getAxiosConfig(),
});
const finetunes = response.data.finetunes;
// Fetch details for each finetune
const finetuneDetails = await Promise.all(
finetunes.map(async (finetuneId) => {
try {
const detailResponse = await axios.get(`${detailsUrl}?finetune_id=${finetuneId}`, {
headers,
...this.getAxiosConfig(),
});
return {
id: finetuneId,
...detailResponse.data,
};
} catch (error) {
logger.error(`[FluxAPI] Error fetching details for finetune ${finetuneId}:`, error);
return {
id: finetuneId,
error: 'Failed to fetch details',
};
}
}),
);
if (this.isAgent) {
const formattedDetails = JSON.stringify(finetuneDetails, null, 2);
return [`Here are the available finetunes:\n${formattedDetails}`, null];
}
return JSON.stringify(finetuneDetails);
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while getting finetunes:', details);
const errorMsg = `Failed to get finetunes: ${details}`;
return this.isAgent ? this.returnValue([errorMsg, {}]) : new Error(errorMsg);
}
}
async generateFinetunedImage(imageData, requestApiKey) {
if (!imageData.prompt) {
throw new Error('Missing required field: prompt');
}
if (!imageData.finetune_id) {
throw new Error(
'Missing required field: finetune_id for finetuned generation. Please supply a finetune_id!',
);
}
// Validate endpoint is appropriate for finetuned generation
const validFinetunedEndpoints = ['/v1/flux-pro-finetuned', '/v1/flux-pro-1.1-ultra-finetuned'];
const endpoint = imageData.endpoint || '/v1/flux-pro-finetuned';
if (!validFinetunedEndpoints.includes(endpoint)) {
throw new Error(
`Invalid endpoint for finetuned generation. Must be one of: ${validFinetunedEndpoints.join(', ')}`,
);
}
let payload = {
prompt: imageData.prompt,
prompt_upsampling: imageData.prompt_upsampling || false,
safety_tolerance: imageData.safety_tolerance || 6,
output_format: imageData.output_format || 'png',
finetune_id: imageData.finetune_id,
finetune_strength: imageData.finetune_strength || 1.0,
guidance: imageData.guidance || 2.5,
};
// Add optional parameters if provided
if (imageData.width) {
payload.width = imageData.width;
}
if (imageData.height) {
payload.height = imageData.height;
}
if (imageData.steps) {
payload.steps = imageData.steps;
}
if (imageData.seed !== undefined) {
payload.seed = imageData.seed;
}
if (imageData.raw) {
payload.raw = imageData.raw;
}
const generateUrl = `${this.baseUrl}${endpoint}`;
const resultUrl = `${this.baseUrl}/v1/get_result`;
logger.debug('[FluxAPI] Generating finetuned image with payload:', payload);
logger.debug('[FluxAPI] Using endpoint:', generateUrl);
let taskResponse;
try {
taskResponse = await axios.post(generateUrl, payload, {
headers: {
'x-key': requestApiKey,
'Content-Type': 'application/json',
Accept: 'application/json',
},
...this.getAxiosConfig(),
});
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while submitting finetuned task:', details);
return this.returnValue(
`Something went wrong when trying to generate the finetuned image. The Flux API may be unavailable:
Error Message: ${details}`,
);
}
const taskId = taskResponse.data.id;
// Polling for the result
let status = 'Pending';
let resultData = null;
while (status !== 'Ready' && status !== 'Error') {
try {
// Wait 2 seconds between polls
await new Promise((resolve) => setTimeout(resolve, 2000));
const resultResponse = await axios.get(resultUrl, {
headers: {
'x-key': requestApiKey,
Accept: 'application/json',
},
params: { id: taskId },
...this.getAxiosConfig(),
});
status = resultResponse.data.status;
if (status === 'Ready') {
resultData = resultResponse.data.result;
break;
} else if (status === 'Error') {
logger.error('[FluxAPI] Error in finetuned task:', resultResponse.data);
return this.returnValue('An error occurred during finetuned image generation.');
}
} catch (error) {
const details = this.getDetails(error?.response?.data || error.message);
logger.error('[FluxAPI] Error while getting finetuned result:', details);
return this.returnValue('An error occurred while retrieving the finetuned image.');
}
}
// If no result data
if (!resultData || !resultData.sample) {
logger.error('[FluxAPI] No image data received from API. Response:', resultData);
return this.returnValue('No image data received from Flux API.');
}
// Try saving the image locally
const imageUrl = resultData.sample;
const imageName = `img-${uuidv4()}.png`;
try {
logger.debug('[FluxAPI] Saving finetuned image:', imageUrl);
const result = await this.processFileURL({
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: imageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
logger.debug('[FluxAPI] Finetuned image saved to path:', result.filepath);
// Calculate cost based on endpoint
const endpointKey = endpoint.includes('ultra')
? 'FLUX_PRO_1_1_ULTRA_FINETUNED'
: 'FLUX_PRO_FINETUNED';
const cost = FluxAPI.PRICING[endpointKey] || 0;
// Return the result based on returnMetadata flag
this.result = this.returnMetadata ? result : this.wrapInMarkdown(result.filepath);
return this.returnValue(this.result);
} catch (error) {
const details = this.getDetails(error?.message ?? 'No additional error details.');
logger.error('Error while saving the finetuned image:', details);
return this.returnValue(`Failed to save the finetuned image locally. ${details}`);
}
}
}
module.exports = FluxAPI;

View File

@@ -6,10 +6,13 @@ const axios = require('axios');
const sharp = require('sharp');
const { v4: uuidv4 } = require('uuid');
const { Tool } = require('@langchain/core/tools');
const { FileContext } = require('librechat-data-provider');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const paths = require('~/config/paths');
const { logger } = require('~/config');
const displayMessage =
'Stable Diffusion displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
class StableDiffusionAPI extends Tool {
constructor(fields) {
super();
@@ -21,6 +24,8 @@ class StableDiffusionAPI extends Tool {
this.override = fields.override ?? false;
/** @type {boolean} Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
/** @type {boolean} */
this.isAgent = fields.isAgent;
if (fields.uploadImageBuffer) {
/** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
@@ -66,6 +71,16 @@ class StableDiffusionAPI extends Tool {
return `![generated image](/${imageUrl})`;
}
returnValue(value) {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
return [displayMessage, value];
}
return value;
}
getServerURL() {
const url = process.env.SD_WEBUI_URL || '';
if (!url && !this.override) {
@@ -113,6 +128,25 @@ class StableDiffusionAPI extends Tool {
}
try {
if (this.isAgent) {
const content = [
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: `data:image/png;base64,${image}`,
},
},
];
const response = [
{
type: ContentTypes.TEXT,
text: displayMessage,
},
];
return [response, { content }];
}
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
if (this.returnMetadata && this.uploadImageBuffer && this.req) {
const file = await this.uploadImageBuffer({
@@ -154,7 +188,7 @@ class StableDiffusionAPI extends Tool {
logger.error('[StableDiffusion] Error while saving the image:', error);
}
return this.result;
return this.returnValue(this.result);
}
}

View File

@@ -10,6 +10,7 @@ const {
GoogleSearchAPI,
// Structured Tools
DALLE3,
FluxAPI,
OpenWeather,
StructuredSD,
StructuredACS,
@@ -182,6 +183,7 @@ const loadTools = async ({
returnMap = false,
}) => {
const toolConstructors = {
flux: FluxAPI,
calculator: Calculator,
google: GoogleSearchAPI,
open_weather: OpenWeather,
@@ -230,9 +232,10 @@ const loadTools = async ({
};
const toolOptions = {
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
flux: imageGenOptions,
dalle: imageGenOptions,
'stable-diffusion': imageGenOptions,
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
};
const toolContextMap = {};

View File

@@ -2,6 +2,7 @@ const { z } = require('zod');
const Message = require('./schema/messageSchema');
const { logger } = require('~/config');
// Validate conversation ID as a UUID (if your conversation IDs follow UUID format)
const idSchema = z.string().uuid();
/**
@@ -28,8 +29,11 @@ const idSchema = z.string().uuid();
* @param {string} [params.plugin] - Plugin associated with the message.
* @param {string[]} [params.plugins] - An array of plugins associated with the message.
* @param {string} [params.model] - The model used to generate the message.
* @param {Object} [metadata] - Additional metadata for this operation
* @param {string} [metadata.context] - The context of the operation
* @param {string} [params.iv] - (Optional) Base64-encoded initialization vector for encryption.
* @param {string} [params.authTag] - (Optional) Base64-encoded authentication tag from AES-GCM.
* @param {string} [params.encryptedKey] - (Optional) Base64-encoded AES key encrypted with RSA.
* @param {Object} [metadata] - Additional metadata for this operation.
* @param {string} [metadata.context] - The context of the operation.
* @returns {Promise<TMessage>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
@@ -51,6 +55,9 @@ async function saveMessage(req, params, metadata) {
...params,
user: req.user.id,
messageId: params.newMessageId || params.messageId,
iv: params.iv ?? null,
authTag: params.authTag ?? null,
encryptedKey: params.encryptedKey ?? null,
};
if (req?.body?.isTemporary) {
@@ -90,7 +97,12 @@ async function bulkSaveMessages(messages, overrideTimestamp = false) {
const bulkOps = messages.map((message) => ({
updateOne: {
filter: { messageId: message.messageId },
update: message,
update: {
...message,
iv: message.iv ?? null,
authTag: message.authTag ?? null,
encryptedKey: message.encryptedKey ?? null,
},
timestamps: !overrideTimestamp,
upsert: true,
},
@@ -119,14 +131,7 @@ async function bulkSaveMessages(messages, overrideTimestamp = false) {
* @returns {Promise<Object>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async function recordMessage({
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest
}) {
async function recordMessage({ user, endpoint, messageId, conversationId, parentMessageId, ...rest }) {
try {
// No parsing of convoId as may use threadId
const message = {
@@ -136,6 +141,9 @@ async function recordMessage({
conversationId,
parentMessageId,
...rest,
iv: rest.iv ?? null,
authTag: rest.authTag ?? null,
encryptedKey: rest.encryptedKey ?? null,
};
return await Message.findOneAndUpdate({ user, messageId }, message, {
@@ -190,12 +198,15 @@ async function updateMessageText(req, { messageId, text }) {
async function updateMessage(req, message, metadata) {
try {
const { messageId, ...update } = message;
// Ensure encryption fields are explicitly updated (if provided)
update.iv = update.iv ?? null;
update.authTag = update.authTag ?? null;
update.encryptedKey = update.encryptedKey ?? null;
const updatedMessage = await Message.findOneAndUpdate(
{ messageId, user: req.user.id },
update,
{
new: true,
},
{ new: true },
);
if (!updatedMessage) {
@@ -225,11 +236,11 @@ async function updateMessage(req, message, metadata) {
*
* @async
* @function deleteMessagesSince
* @param {Object} params - The parameters object.
* @param {Object} req - The request object.
* @param {Object} params - The parameters object.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @returns {Promise<Number>} The number of deleted messages.
* @returns {Promise<number>} The number of deleted messages.
* @throws {Error} If there is an error in deleting messages.
*/
async function deleteMessagesSince(req, { messageId, conversationId }) {
@@ -263,7 +274,6 @@ async function getMessages(filter, select) {
if (select) {
return await Message.find(filter).select(select).sort({ createdAt: 1 }).lean();
}
return await Message.find(filter).sort({ createdAt: 1 }).lean();
} catch (err) {
logger.error('Error getting messages:', err);
@@ -281,10 +291,7 @@ async function getMessages(filter, select) {
*/
async function getMessage({ user, messageId }) {
try {
return await Message.findOne({
user,
messageId,
}).lean();
return await Message.findOne({ user, messageId }).lean();
} catch (err) {
logger.error('Error getting message:', err);
throw err;

View File

@@ -56,6 +56,10 @@ const conversationPreset = {
type: Number,
required: false,
},
maxTokens: {
type: Number,
required: false,
},
presence_penalty: {
type: Number,
required: false,

View File

@@ -137,6 +137,18 @@ const messageSchema = mongoose.Schema(
expiredAt: {
type: Date,
},
iv: {
type: String,
default: null,
},
authTag: {
type: String,
default: null,
},
encryptedKey: {
type: String,
default: null,
},
},
{ timestamps: true },
);

View File

@@ -27,6 +27,10 @@ const { SystemRoles } = require('librechat-data-provider');
* @property {Array} [plugins=[]] - List of plugins used by the user
* @property {Array.<MongoSession>} [refreshToken] - List of sessions with refresh tokens
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {string} [encryptionPublicKey] - The user's encryption public key
* @property {string} [encryptedPrivateKey] - The user's encrypted private key
* @property {string} [encryptionSalt] - The salt used for key derivation (e.g., PBKDF2)
* @property {string} [encryptionIV] - The IV used for encrypting the private key
* @property {Date} [createdAt] - Date when the user was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the user was last updated (added by timestamps)
*/
@@ -143,6 +147,22 @@ const userSchema = mongoose.Schema(
type: Boolean,
default: false,
},
encryptionPublicKey: {
type: String,
default: null,
},
encryptedPrivateKey: {
type: String,
default: null,
},
encryptionSalt: {
type: String,
default: null,
},
encryptionIV: {
type: String,
default: null,
},
},
{ timestamps: true },

View File

@@ -79,6 +79,7 @@ const tokenValues = Object.assign(
'o1-mini': { prompt: 1.1, completion: 4.4 },
'o1-preview': { prompt: 15, completion: 60 },
o1: { prompt: 15, completion: 60 },
'gpt-4.5': { prompt: 75, completion: 150 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 2.5, completion: 10 },
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
@@ -167,6 +168,8 @@ const getValueKey = (model, endpoint) => {
return 'o1-mini';
} else if (modelName.includes('o1')) {
return 'o1';
} else if (modelName.includes('gpt-4.5')) {
return 'gpt-4.5';
} else if (modelName.includes('gpt-4o-2024-05-13')) {
return 'gpt-4o-2024-05-13';
} else if (modelName.includes('gpt-4o-mini')) {

View File

@@ -50,6 +50,16 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
});
it('should return "gpt-4.5" for model type of "gpt-4.5"', () => {
expect(getValueKey('gpt-4.5-preview')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-2024-08-06')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-2024-08-06-0718')).toBe('gpt-4.5');
expect(getValueKey('openai/gpt-4.5')).toBe('gpt-4.5');
expect(getValueKey('openai/gpt-4.5-2024-08-06')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-turbo')).toBe('gpt-4.5');
expect(getValueKey('gpt-4.5-0125')).toBe('gpt-4.5');
});
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');

View File

@@ -36,7 +36,7 @@
"dependencies": {
"@anthropic-ai/sdk": "^0.37.0",
"@azure/search-documents": "^12.0.0",
"@google/generative-ai": "^0.21.0",
"@google/generative-ai": "^0.23.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/mongo": "^2.1.8",
"@keyv/redis": "^2.8.1",
@@ -45,7 +45,7 @@
"@langchain/google-genai": "^0.1.9",
"@langchain/google-vertexai": "^0.2.0",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.1.3",
"@librechat/agents": "^2.1.8",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "1.7.8",
"bcryptjs": "^2.4.3",

View File

@@ -1,9 +1,59 @@
const { getResponseSender, Constants } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { saveMessage } = require('~/models');
const { saveMessage, getUserById } = require('~/models');
const { logger } = require('~/config');
let crypto;
try {
crypto = require('crypto');
} catch (err) {
logger.error('[AskController] crypto support is disabled!', err);
}
/**
* Helper function to encrypt plaintext using AES-256-GCM and then RSA-encrypt the AES key.
* @param {string} plainText - The plaintext to encrypt.
* @param {string} pemPublicKey - The RSA public key in PEM format.
* @returns {Object} An object containing the ciphertext, iv, authTag, and encryptedKey.
*/
function encryptText(plainText, pemPublicKey) {
// Generate a random 256-bit AES key and a 12-byte IV.
const aesKey = crypto.randomBytes(32);
const iv = crypto.randomBytes(12);
// Encrypt the plaintext using AES-256-GCM.
const cipher = crypto.createCipheriv('aes-256-gcm', aesKey, iv);
let ciphertext = cipher.update(plainText, 'utf8', 'base64');
ciphertext += cipher.final('base64');
const authTag = cipher.getAuthTag().toString('base64');
// Encrypt the AES key using the user's RSA public key.
const encryptedKey = crypto.publicEncrypt(
{
key: pemPublicKey,
padding: crypto.constants.RSA_PKCS1_OAEP_PADDING,
oaepHash: 'sha256',
},
aesKey,
).toString('base64');
return {
ciphertext,
iv: iv.toString('base64'),
authTag,
encryptedKey,
};
}
/**
* AskController
* - Initializes the client.
* - Obtains the response from the language model.
* - Retrieves the full user record (to get encryption parameters).
* - If the user has encryption enabled (i.e. encryptionPublicKey is provided),
* encrypts both the request (userMessage) and the response before saving.
*/
const AskController = async (req, res, next, initializeClient, addTitle) => {
let {
text,
@@ -32,7 +82,22 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
modelDisplayLabel,
});
const newConvo = !conversationId;
const user = req.user.id;
const userId = req.user.id; // User ID from authentication
// Retrieve full user record from DB (including encryption parameters)
const dbUser = await getUserById(userId, 'encryptionPublicKey encryptedPrivateKey encryptionSalt encryptionIV');
// Build clientOptions including the encryptionPublicKey (if available)
const clientOptions = {
encryptionPublicKey: dbUser?.encryptionPublicKey,
};
// Rebuild PEM format if encryptionPublicKey is available
let pemPublicKey = null;
if (clientOptions.encryptionPublicKey && clientOptions.encryptionPublicKey.trim() !== '') {
const pubKeyBase64 = clientOptions.encryptionPublicKey;
pemPublicKey = `-----BEGIN PUBLIC KEY-----\n${pubKeyBase64.match(/.{1,64}/g).join('\n')}\n-----END PUBLIC KEY-----`;
}
const getReqData = (data = {}) => {
for (let key in data) {
@@ -52,11 +117,10 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
};
let getText;
try {
const { client } = await initializeClient({ req, res, endpointOption });
// Pass clientOptions (which includes encryptionPublicKey) along with other parameters to initializeClient
const { client } = await initializeClient({ req, res, endpointOption, ...clientOptions });
const { onProgress: progressCallback, getPartialText } = createOnProgress();
getText = client.getStreamText != null ? client.getStreamText.bind(client) : getPartialText;
const getAbortData = () => ({
@@ -74,20 +138,14 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
res.on('close', () => {
logger.debug('[AskController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
if (!abortController) { return; }
if (abortController.signal.aborted || abortController.requestCompleted) { return; }
abortController.abort();
logger.debug('[AskController] Request aborted on close');
});
const messageOptions = {
user,
user: userId,
parentMessageId,
conversationId,
overrideParentMessageId,
@@ -95,16 +153,14 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
onStart,
abortController,
progressCallback,
progressOptions: {
res,
// parentMessageId: overrideParentMessageId || userMessageId,
},
progressOptions: { res },
};
/** @type {TMessage} */
// Get the response from the language model client.
let response = await client.sendMessage(text, messageOptions);
response.endpoint = endpointOption.endpoint;
// Ensure the conversation has a title.
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
@@ -115,6 +171,35 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
delete userMessage.image_urls;
}
// --- Encrypt the user message if encryption is enabled ---
if (pemPublicKey && userMessage && userMessage.text) {
try {
const { ciphertext, iv, authTag, encryptedKey } = encryptText(userMessage.text, pemPublicKey);
userMessage.text = ciphertext;
userMessage.iv = iv;
userMessage.authTag = authTag;
userMessage.encryptedKey = encryptedKey;
logger.debug('[AskController] User message encrypted.');
} catch (encError) {
logger.error('[AskController] Error encrypting user message:', encError);
}
}
// --- Encrypt the AI response if encryption is enabled ---
if (pemPublicKey && response.text) {
try {
const { ciphertext, iv, authTag, encryptedKey } = encryptText(response.text, pemPublicKey);
response.text = ciphertext;
response.iv = iv;
response.authTag = authTag;
response.encryptedKey = encryptedKey;
logger.debug('[AskController] Response message encrypted.');
} catch (encError) {
logger.error('[AskController] Error encrypting response message:', encError);
}
}
// --- End Encryption Branch ---
if (!abortController.signal.aborted) {
sendMessage(res, {
final: true,
@@ -128,15 +213,15 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
if (!client.savedMessageIds.has(response.messageId)) {
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/AskController.js - response end' },
{ ...response, user: userId },
{ context: 'AskController - response end' },
);
}
}
if (!client.skipSaveUserMessage) {
await saveMessage(req, userMessage, {
context: 'api/server/controllers/AskController.js - don\'t skip saving user message',
context: 'AskController - save user message',
});
}
@@ -156,9 +241,9 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
}).catch((err) => {
logger.error('[AskController] Error in `handleAbortError`', err);
logger.error('[AskController] Error in handleAbortError', err);
});
}
};
module.exports = AskController;
module.exports = AskController;

View File

@@ -7,6 +7,7 @@ const {
deleteMessages,
deleteUserById,
deleteAllUserSessions,
updateUser,
} = require('~/models');
const User = require('~/models/User');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
@@ -164,6 +165,37 @@ const resendVerificationController = async (req, res) => {
}
};
const updateUserEncryptionController = async (req, res) => {
try {
const { encryptionPublicKey, encryptedPrivateKey, encryptionSalt, encryptionIV } = req.body;
// Allow disabling encryption by passing null for all fields.
const allNull = encryptionPublicKey === null && encryptedPrivateKey === null && encryptionSalt === null && encryptionIV === null;
const allPresent = encryptionPublicKey && encryptedPrivateKey && encryptionSalt && encryptionIV;
if (!allNull && !allPresent) {
return res.status(400).json({ message: 'Missing encryption parameters.' });
}
// Update the user record with the provided encryption parameters (or null to disable)
const updatedUser = await updateUser(req.user.id, {
encryptionPublicKey: encryptionPublicKey || null,
encryptedPrivateKey: encryptedPrivateKey || null,
encryptionSalt: encryptionSalt || null,
encryptionIV: encryptionIV || null,
});
if (!updatedUser) {
return res.status(404).json({ message: 'User not found.' });
}
res.status(200).json({ success: true });
} catch (error) {
logger.error('[updateUserEncryptionController]', error);
res.status(500).json({ message: 'Something went wrong updating encryption keys.' });
}
};
module.exports = {
getUserController,
getTermsStatusController,
@@ -172,4 +204,5 @@ module.exports = {
verifyEmailController,
updateUserPluginsController,
resendVerificationController,
updateUserEncryptionController,
};

View File

@@ -1,4 +1,5 @@
const { Tools, StepTypes, imageGenTools, FileContext } = require('librechat-data-provider');
const { nanoid } = require('nanoid');
const { Tools, StepTypes, FileContext } = require('librechat-data-provider');
const {
EnvVar,
Providers,
@@ -242,32 +243,6 @@ function createToolEndCallback({ req, res, artifactPromises }) {
return;
}
if (imageGenTools.has(output.name)) {
artifactPromises.push(
(async () => {
const fileMetadata = Object.assign(output.artifact, {
messageId: metadata.run_id,
toolCallId: output.tool_call_id,
conversationId: metadata.thread_id,
});
if (!res.headersSent) {
return fileMetadata;
}
if (!fileMetadata) {
return null;
}
res.write(`event: attachment\ndata: ${JSON.stringify(fileMetadata)}\n\n`);
return fileMetadata;
})().catch((error) => {
logger.error('Error processing code output:', error);
return null;
}),
);
return;
}
if (output.artifact.content) {
/** @type {FormattedContent[]} */
const content = output.artifact.content;
@@ -278,7 +253,7 @@ function createToolEndCallback({ req, res, artifactPromises }) {
const { url } = part.image_url;
artifactPromises.push(
(async () => {
const filename = `${output.tool_call_id}-image-${new Date().getTime()}`;
const filename = `${output.name}_${output.tool_call_id}_img_${nanoid()}`;
const file = await saveBase64Image(url, {
req,
filename,

View File

@@ -17,7 +17,7 @@ const {
KnownEndpoints,
anthropicSchema,
isAgentsEndpoint,
bedrockOutputParser,
bedrockInputSchema,
removeNullishValues,
} = require('librechat-data-provider');
const {
@@ -27,10 +27,11 @@ const {
formatContentStrings,
createContextHandlers,
} = require('~/app/clients/prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { getCustomEndpointConfig } = require('~/server/services/Config');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const BaseClient = require('~/app/clients/BaseClient');
const { createRun } = require('./run');
const { logger } = require('~/config');
@@ -39,10 +40,10 @@ const { logger } = require('~/config');
/** @typedef {import('@langchain/core/runnables').RunnableConfig} RunnableConfig */
const providerParsers = {
[EModelEndpoint.openAI]: openAISchema,
[EModelEndpoint.azureOpenAI]: openAISchema,
[EModelEndpoint.anthropic]: anthropicSchema,
[EModelEndpoint.bedrock]: bedrockOutputParser,
[EModelEndpoint.openAI]: openAISchema.parse,
[EModelEndpoint.azureOpenAI]: openAISchema.parse,
[EModelEndpoint.anthropic]: anthropicSchema.parse,
[EModelEndpoint.bedrock]: bedrockInputSchema.parse,
};
const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deepseek]);
@@ -187,7 +188,14 @@ class AgentClient extends BaseClient {
: {};
if (parseOptions) {
runOptions = parseOptions(this.options.agent.model_parameters);
try {
runOptions = parseOptions(this.options.agent.model_parameters);
} catch (error) {
logger.error(
'[api/server/controllers/agents/client.js #getSaveOptions] Error parsing options',
error,
);
}
}
return removeNullishValues(
@@ -380,15 +388,34 @@ class AgentClient extends BaseClient {
if (!collectedUsage || !collectedUsage.length) {
return;
}
const input_tokens = collectedUsage[0]?.input_tokens || 0;
const input_tokens =
(collectedUsage[0]?.input_tokens || 0) +
(Number(collectedUsage[0]?.input_token_details?.cache_creation) || 0) +
(Number(collectedUsage[0]?.input_token_details?.cache_read) || 0);
let output_tokens = 0;
let previousTokens = input_tokens; // Start with original input
for (let i = 0; i < collectedUsage.length; i++) {
const usage = collectedUsage[i];
if (!usage) {
continue;
}
const cache_creation = Number(usage.input_token_details?.cache_creation) || 0;
const cache_read = Number(usage.input_token_details?.cache_read) || 0;
const txMetadata = {
context,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
model: usage.model ?? model ?? this.model ?? this.options.agent.model_parameters.model,
};
if (i > 0) {
// Count new tokens generated (input_tokens minus previous accumulated tokens)
output_tokens += (Number(usage.input_tokens) || 0) - previousTokens;
output_tokens +=
(Number(usage.input_tokens) || 0) + cache_creation + cache_read - previousTokens;
}
// Add this message's output tokens
@@ -396,16 +423,26 @@ class AgentClient extends BaseClient {
// Update previousTokens to include this message's output
previousTokens += Number(usage.output_tokens) || 0;
spendTokens(
{
context,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
model: usage.model ?? model ?? this.model ?? this.options.agent.model_parameters.model,
},
{ promptTokens: usage.input_tokens, completionTokens: usage.output_tokens },
).catch((err) => {
if (cache_creation > 0 || cache_read > 0) {
spendStructuredTokens(txMetadata, {
promptTokens: {
input: usage.input_tokens,
write: cache_creation,
read: cache_read,
},
completionTokens: usage.output_tokens,
}).catch((err) => {
logger.error(
'[api/server/controllers/agents/client.js #recordCollectedUsage] Error spending structured tokens',
err,
);
});
}
spendTokens(txMetadata, {
promptTokens: usage.input_tokens,
completionTokens: usage.output_tokens,
}).catch((err) => {
logger.error(
'[api/server/controllers/agents/client.js #recordCollectedUsage] Error spending tokens',
err,
@@ -766,6 +803,10 @@ class AgentClient extends BaseClient {
);
}
} catch (err) {
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
err,
);
if (!abortController.signal.aborted) {
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Unhandled error type',
@@ -773,11 +814,6 @@ class AgentClient extends BaseClient {
);
throw err;
}
logger.warn(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
err,
);
}
}
@@ -792,14 +828,20 @@ class AgentClient extends BaseClient {
throw new Error('Run not initialized');
}
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
const clientOptions = {};
const providerConfig = this.options.req.app.locals[this.options.agent.provider];
/** @type {import('@librechat/agents').ClientOptions} */
const clientOptions = {
maxTokens: 75,
};
let endpointConfig = this.options.req.app.locals[this.options.agent.endpoint];
if (!endpointConfig) {
endpointConfig = await getCustomEndpointConfig(this.options.agent.endpoint);
}
if (
providerConfig &&
providerConfig.titleModel &&
providerConfig.titleModel !== Constants.CURRENT_MODEL
endpointConfig &&
endpointConfig.titleModel &&
endpointConfig.titleModel !== Constants.CURRENT_MODEL
) {
clientOptions.model = providerConfig.titleModel;
clientOptions.model = endpointConfig.titleModel;
}
try {
const titleResult = await this.run.generateTitle({

View File

@@ -45,7 +45,10 @@ async function createRun({
/** @type {'reasoning_content' | 'reasoning'} */
let reasoningKey;
if (llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter)) {
if (
llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) ||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
reasoningKey = 'reasoning';
}
if (/o1(?!-(?:mini|preview)).*$/.test(llmConfig.model)) {

View File

@@ -8,12 +8,14 @@ const {
resendVerificationController,
getTermsStatusController,
acceptTermsController,
updateUserEncryptionController,
} = require('~/server/controllers/UserController');
const router = express.Router();
router.get('/', requireJwtAuth, getUserController);
router.get('/terms', requireJwtAuth, getTermsStatusController);
router.put('/encryption', requireJwtAuth, updateUserEncryptionController);
router.post('/terms/accept', requireJwtAuth, acceptTermsController);
router.post('/plugins', requireJwtAuth, updateUserPluginsController);
router.delete('/delete', requireJwtAuth, canDeleteAccount, deleteUserController);

View File

@@ -101,6 +101,7 @@ const initializeAgentOptions = async ({
});
const provider = agent.provider;
agent.endpoint = provider;
let getOptions = providerConfigMap[provider];
if (!getOptions && providerConfigMap[provider.toLowerCase()] != null) {
agent.provider = provider.toLowerCase();
@@ -112,9 +113,7 @@ const initializeAgentOptions = async ({
}
getOptions = initCustom;
agent.provider = Providers.OPENAI;
agent.endpoint = provider.toLowerCase();
}
const model_parameters = Object.assign(
{},
agent.model_parameters ?? { model: agent.model },

View File

@@ -20,10 +20,19 @@ const addTitle = async (req, { text, response, client }) => {
const titleCache = getLogStores(CacheKeys.GEN_TITLE);
const key = `${req.user.id}-${response.conversationId}`;
const responseText =
response?.content && Array.isArray(response?.content)
? response.content.reduce((acc, block) => {
if (block?.type === 'text') {
return acc + block.text;
}
return acc;
}, '')
: (response?.content ?? response?.text ?? '');
const title = await client.titleConvo({
text,
responseText: response?.text ?? '',
responseText,
conversationId: response.conversationId,
});
await titleCache.set(key, title, 120000);

View File

@@ -48,7 +48,8 @@ function getClaudeHeaders(model, supportsCacheControl) {
};
} else if (/claude-3[-.]7/.test(model)) {
return {
'anthropic-beta': 'output-128k-2025-02-19,prompt-caching-2024-07-31',
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
};
} else {
return {

View File

@@ -27,6 +27,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
if (anthropicConfig) {
clientOptions.streamRate = anthropicConfig.streamRate;
clientOptions.titleModel = anthropicConfig.titleModel;
}
/** @type {undefined | TBaseEndpoint} */

View File

@@ -1,6 +1,6 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { anthropicSettings, removeNullishValues } = require('librechat-data-provider');
const { checkPromptCacheSupport, getClaudeHeaders } = require('./helpers');
const { checkPromptCacheSupport, getClaudeHeaders, configureReasoning } = require('./helpers');
/**
* Generates configuration options for creating an Anthropic language model (LLM) instance.
@@ -49,13 +49,14 @@ function getLLMConfig(apiKey, options = {}) {
clientOptions: {},
};
requestOptions = configureReasoning(requestOptions, systemOptions);
if (!/claude-3[-.]7/.test(mergedOptions.model)) {
if (mergedOptions.topP !== undefined) {
requestOptions.topP = mergedOptions.topP;
}
if (mergedOptions.topK !== undefined) {
requestOptions.topK = mergedOptions.topK;
}
requestOptions.topP = mergedOptions.topP;
requestOptions.topK = mergedOptions.topK;
} else if (requestOptions.thinking == null) {
requestOptions.topP = mergedOptions.topP;
requestOptions.topK = mergedOptions.topK;
}
const supportsCacheControl =

View File

@@ -109,4 +109,45 @@ describe('getLLMConfig', () => {
// Just verifying that the promptCache setting is processed
expect(result.llmConfig).toBeDefined();
});
it('should include topK and topP for Claude-3.7 models when thinking is not enabled', () => {
// Test with thinking explicitly set to null/undefined
const result = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
topK: 10,
topP: 0.9,
thinking: false,
},
});
expect(result.llmConfig).toHaveProperty('topK', 10);
expect(result.llmConfig).toHaveProperty('topP', 0.9);
// Test with thinking explicitly set to false
const result2 = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3-7-sonnet',
topK: 10,
topP: 0.9,
thinking: false,
},
});
expect(result2.llmConfig).toHaveProperty('topK', 10);
expect(result2.llmConfig).toHaveProperty('topP', 0.9);
// Test with decimal notation as well
const result3 = getLLMConfig('test-api-key', {
modelOptions: {
model: 'claude-3.7-sonnet',
topK: 10,
topP: 0.9,
thinking: false,
},
});
expect(result3.llmConfig).toHaveProperty('topK', 10);
expect(result3.llmConfig).toHaveProperty('topP', 0.9);
});
});

View File

@@ -1,6 +1,5 @@
const { removeNullishValues, bedrockInputParser } = require('librechat-data-provider');
const { removeNullishValues } = require('librechat-data-provider');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { logger } = require('~/config');
const buildOptions = (endpoint, parsedBody) => {
const {
@@ -15,12 +14,6 @@ const buildOptions = (endpoint, parsedBody) => {
artifacts,
...model_parameters
} = parsedBody;
let parsedParams = model_parameters;
try {
parsedParams = bedrockInputParser.parse(model_parameters);
} catch (error) {
logger.warn('Failed to parse bedrock input', error);
}
const endpointOption = removeNullishValues({
endpoint,
name,
@@ -31,7 +24,7 @@ const buildOptions = (endpoint, parsedBody) => {
spec,
promptPrefix,
maxContextTokens,
model_parameters: parsedParams,
model_parameters,
});
if (typeof artifacts === 'string') {

View File

@@ -1,14 +1,16 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
EModelEndpoint,
Constants,
AuthType,
Constants,
EModelEndpoint,
bedrockInputParser,
bedrockOutputParser,
removeNullishValues,
} = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { sleep } = require('~/server/utils');
const getOptions = async ({ req, endpointOption }) => {
const getOptions = async ({ req, overrideModel, endpointOption }) => {
const {
BEDROCK_AWS_SECRET_ACCESS_KEY,
BEDROCK_AWS_ACCESS_KEY_ID,
@@ -62,39 +64,44 @@ const getOptions = async ({ req, endpointOption }) => {
/** @type {BedrockClientOptions} */
const requestOptions = {
model: endpointOption.model,
model: overrideModel ?? endpointOption.model,
region: BEDROCK_AWS_DEFAULT_REGION,
streaming: true,
streamUsage: true,
callbacks: [
{
handleLLMNewToken: async () => {
if (!streamRate) {
return;
}
await sleep(streamRate);
},
},
],
};
if (credentials) {
requestOptions.credentials = credentials;
}
if (BEDROCK_REVERSE_PROXY) {
requestOptions.endpointHost = BEDROCK_REVERSE_PROXY;
}
const configOptions = {};
if (PROXY) {
/** NOTE: NOT SUPPORTED BY BEDROCK */
configOptions.httpAgent = new HttpsProxyAgent(PROXY);
}
const llmConfig = bedrockOutputParser(
bedrockInputParser.parse(
removeNullishValues(Object.assign(requestOptions, endpointOption.model_parameters)),
),
);
if (credentials) {
llmConfig.credentials = credentials;
}
if (BEDROCK_REVERSE_PROXY) {
llmConfig.endpointHost = BEDROCK_REVERSE_PROXY;
}
llmConfig.callbacks = [
{
handleLLMNewToken: async () => {
if (!streamRate) {
return;
}
await sleep(streamRate);
},
},
];
return {
/** @type {BedrockClientOptions} */
llmConfig: removeNullishValues(Object.assign(requestOptions, endpointOption.model_parameters)),
llmConfig,
configOptions,
};
};

View File

@@ -141,7 +141,7 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
},
clientOptions,
);
const options = getLLMConfig(apiKey, clientOptions);
const options = getLLMConfig(apiKey, clientOptions, endpoint);
if (!customOptions.streamRate) {
return options;
}

View File

@@ -5,12 +5,7 @@ const { isEnabled } = require('~/server/utils');
const { GoogleClient } = require('~/app');
const initializeClient = async ({ req, res, endpointOption, overrideModel, optionsOnly }) => {
const {
GOOGLE_KEY,
GOOGLE_REVERSE_PROXY,
GOOGLE_AUTH_HEADER,
PROXY,
} = process.env;
const { GOOGLE_KEY, GOOGLE_REVERSE_PROXY, GOOGLE_AUTH_HEADER, PROXY } = process.env;
const isUserProvided = GOOGLE_KEY === 'user_provided';
const { key: expiresAt } = req.body;
@@ -43,6 +38,7 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
if (googleConfig) {
clientOptions.streamRate = googleConfig.streamRate;
clientOptions.titleModel = googleConfig.titleModel;
}
if (allConfig) {

View File

@@ -113,6 +113,7 @@ const initializeClient = async ({
if (!isAzureOpenAI && openAIConfig) {
clientOptions.streamRate = openAIConfig.streamRate;
clientOptions.titleModel = openAIConfig.titleModel;
}
/** @type {undefined | TBaseEndpoint} */

View File

@@ -23,13 +23,13 @@ const { isEnabled } = require('~/server/utils');
* @param {boolean} [options.streaming] - Whether to use streaming mode.
* @param {Object} [options.addParams] - Additional parameters to add to the model options.
* @param {string[]} [options.dropParams] - Parameters to remove from the model options.
* @param {string|null} [endpoint=null] - The endpoint name
* @returns {Object} Configuration options for creating an LLM instance.
*/
function getLLMConfig(apiKey, options = {}) {
function getLLMConfig(apiKey, options = {}, endpoint = null) {
const {
modelOptions = {},
reverseProxyUrl,
useOpenRouter,
defaultQuery,
headers,
proxy,
@@ -56,9 +56,14 @@ function getLLMConfig(apiKey, options = {}) {
});
}
let useOpenRouter;
/** @type {OpenAIClientOptions['configuration']} */
const configOptions = {};
if (useOpenRouter || (reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter))) {
if (
(reverseProxyUrl && reverseProxyUrl.includes(KnownEndpoints.openrouter)) ||
(endpoint && endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
) {
useOpenRouter = true;
llmConfig.include_reasoning = true;
configOptions.baseURL = reverseProxyUrl;
configOptions.defaultHeaders = Object.assign(
@@ -118,6 +123,13 @@ function getLLMConfig(apiKey, options = {}) {
llmConfig.organization = process.env.OPENAI_ORGANIZATION;
}
if (useOpenRouter && llmConfig.reasoning_effort != null) {
llmConfig.reasoning = {
effort: llmConfig.reasoning_effort,
};
delete llmConfig.reasoning_effort;
}
return {
/** @type {OpenAIClientOptions} */
llmConfig,

View File

@@ -13,6 +13,7 @@ const openAIModels = {
'gpt-4-32k-0613': 32758, // -10 from max
'gpt-4-1106': 127500, // -500 from max
'gpt-4-0125': 127500, // -500 from max
'gpt-4.5': 127500, // -500 from max
'gpt-4o': 127500, // -500 from max
'gpt-4o-mini': 127500, // -500 from max
'gpt-4o-2024-05-13': 127500, // -500 from max

View File

@@ -103,6 +103,16 @@ describe('getModelMaxTokens', () => {
);
});
test('should return correct tokens for gpt-4.5 matches', () => {
expect(getModelMaxTokens('gpt-4.5')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-4.5']);
expect(getModelMaxTokens('gpt-4.5-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.5'],
);
expect(getModelMaxTokens('openai/gpt-4.5-preview')).toBe(
maxTokensMap[EModelEndpoint.openAI]['gpt-4.5'],
);
});
test('should return correct tokens for Anthropic models', () => {
const models = [
'claude-2.1',

View File

@@ -109,7 +109,9 @@ const ContentParts = memo(
return val;
})
}
label={isSubmitting ? localize('com_ui_thinking') : localize('com_ui_thoughts')}
label={
isSubmitting && isLast ? localize('com_ui_thinking') : localize('com_ui_thoughts')
}
/>
</div>
)}

View File

@@ -29,6 +29,7 @@ const Image = ({
height,
width,
placeholderDimensions,
className,
}: {
imagePath: string;
altText: string;
@@ -38,6 +39,7 @@ const Image = ({
height?: string;
width?: string;
};
className?: string;
}) => {
const [isLoaded, setIsLoaded] = useState(false);
const containerRef = useRef<HTMLDivElement>(null);
@@ -57,7 +59,12 @@ const Image = ({
return (
<Dialog.Root>
<div ref={containerRef}>
<div className="relative mt-1 flex h-auto w-full max-w-lg items-center justify-center overflow-hidden bg-gray-200 text-gray-500 dark:bg-gray-700 dark:text-gray-400">
<div
className={cn(
'relative mt-1 flex h-auto w-full max-w-lg items-center justify-center overflow-hidden bg-surface-active-alt text-text-secondary-alt',
className,
)}
>
<Dialog.Trigger asChild>
<button type="button" aria-haspopup="dialog" aria-expanded="false">
<LazyLoadImage

View File

@@ -1,4 +1,4 @@
import { memo, Suspense, useMemo } from 'react';
import React, { memo, Suspense, useMemo, useEffect, useState } from 'react';
import { useRecoilValue } from 'recoil';
import type { TMessage } from 'librechat-data-provider';
import type { TMessageContentProps, TDisplayProps } from '~/common';
@@ -13,6 +13,77 @@ import Container from './Container';
import Markdown from './Markdown';
import { cn } from '~/utils';
import store from '~/store';
import { useAuthContext } from '~/hooks/AuthContext';
/**
* Helper: Converts a base64 string to an ArrayBuffer.
*/
const base64ToArrayBuffer = (base64: string): ArrayBuffer => {
const binaryStr = window.atob(base64);
const len = binaryStr.length;
const bytes = new Uint8Array(len);
for (let i = 0; i < len; i++) {
bytes[i] = binaryStr.charCodeAt(i);
}
return bytes.buffer;
};
/**
* Helper: Decrypts an encrypted chat message using the provided RSA private key.
* Expects the message object to have: text (ciphertext), iv, authTag, and encryptedKey.
*/
async function decryptChatMessage(
msg: { text: string; iv: string; authTag: string; encryptedKey: string },
privateKey: CryptoKey
): Promise<string> {
// Convert base64 values to ArrayBuffers.
const ciphertextBuffer = base64ToArrayBuffer(msg.text);
const ivBuffer = new Uint8Array(base64ToArrayBuffer(msg.iv));
const authTagBuffer = new Uint8Array(base64ToArrayBuffer(msg.authTag));
const encryptedKeyBuffer = base64ToArrayBuffer(msg.encryptedKey);
// Decrypt the AES key using RSA-OAEP.
let aesKeyRaw: ArrayBuffer;
try {
aesKeyRaw = await window.crypto.subtle.decrypt(
{ name: 'RSA-OAEP' },
privateKey,
encryptedKeyBuffer
);
} catch (err) {
console.error('Failed to decrypt AES key:', err);
throw err;
}
// Import the AES key.
const aesKey = await window.crypto.subtle.importKey(
'raw',
aesKeyRaw,
{ name: 'AES-GCM' },
false,
['decrypt']
);
// Combine ciphertext and auth tag (Web Crypto expects them appended).
const ciphertextBytes = new Uint8Array(ciphertextBuffer);
const combined = new Uint8Array(ciphertextBytes.length + authTagBuffer.length);
combined.set(ciphertextBytes);
combined.set(authTagBuffer, ciphertextBytes.length);
// Decrypt the message using AES-GCM.
let decryptedBuffer: ArrayBuffer;
try {
decryptedBuffer = await window.crypto.subtle.decrypt(
{ name: 'AES-GCM', iv: ivBuffer },
aesKey,
combined.buffer
);
} catch (err) {
console.error('Failed to decrypt message:', err);
throw err;
}
return new TextDecoder().decode(decryptedBuffer);
}
export const ErrorMessage = ({
text,
@@ -40,12 +111,7 @@ export const ErrorMessage = ({
>
<DelayedRender delay={5500}>
<Container message={message}>
<div
className={cn(
'rounded-md border border-red-500 bg-red-500/10 px-3 py-2 text-sm text-gray-600 dark:text-gray-200',
className,
)}
>
<div className={cn('rounded-md border border-red-500 bg-red-500/10 px-3 py-2 text-sm text-gray-600 dark:text-gray-200', className)}>
{localize('com_ui_error_connection')}
</div>
</Container>
@@ -58,10 +124,7 @@ export const ErrorMessage = ({
<div
role="alert"
aria-live="assertive"
className={cn(
'rounded-xl border border-red-500/20 bg-red-500/5 px-3 py-2 text-sm text-gray-600 dark:text-gray-200',
className,
)}
className={cn('rounded-xl border border-red-500/20 bg-red-500/5 px-3 py-2 text-sm text-gray-600 dark:text-gray-200', className)}
>
<Error text={text} />
</div>
@@ -69,41 +132,65 @@ export const ErrorMessage = ({
);
};
const DisplayMessage = ({ text, isCreatedByUser, message, showCursor }: TDisplayProps) => {
const DisplayMessage = ({ text, isCreatedByUser, message, showCursor, className = '' }: TDisplayProps) => {
const { isSubmitting, latestMessage } = useChatContext();
const { user } = useAuthContext();
const enableUserMsgMarkdown = useRecoilValue(store.enableUserMsgMarkdown);
const showCursorState = useMemo(
() => showCursor === true && isSubmitting,
[showCursor, isSubmitting],
);
const isLatestMessage = useMemo(
() => message.messageId === latestMessage?.messageId,
[message.messageId, latestMessage?.messageId],
);
const showCursorState = useMemo(() => showCursor === true && isSubmitting, [showCursor, isSubmitting]);
const isLatestMessage = useMemo(() => message.messageId === latestMessage?.messageId, [message.messageId, latestMessage?.messageId]);
// State to hold the final text to display (decrypted if needed)
const [displayText, setDisplayText] = useState<string>(text);
const [decryptionError, setDecryptionError] = useState<string | null>(null);
useEffect(() => {
if (message.encryptedKey && user?.decryptedPrivateKey) {
// Attempt to decrypt the message using our helper.
decryptChatMessage(
{
text: message.text,
iv: message.iv,
authTag: message.authTag,
encryptedKey: message.encryptedKey,
},
user.decryptedPrivateKey
)
.then((plainText) => {
setDisplayText(plainText);
setDecryptionError(null);
})
.catch((err) => {
console.error('Error decrypting message:', err);
setDecryptionError('Decryption error');
setDisplayText('');
});
} else {
// If no encryption metadata or no private key, display plain text.
setDisplayText(text);
setDecryptionError(null);
}
}, [text, message, user]);
let content: React.ReactElement;
if (!isCreatedByUser) {
content = (
<Markdown content={text} showCursor={showCursorState} isLatestMessage={isLatestMessage} />
);
content = <Markdown content={displayText} showCursor={showCursorState} isLatestMessage={isLatestMessage} />;
} else if (enableUserMsgMarkdown) {
content = <MarkdownLite content={text} />;
content = <MarkdownLite content={displayText} />;
} else {
content = <>{text}</>;
content = <>{displayText}</>;
}
return (
<Container message={message}>
<div
className={cn(
isSubmitting ? 'submitting' : '',
showCursorState && !!text.length ? 'result-streaming' : '',
'markdown prose message-content dark:prose-invert light w-full break-words',
isCreatedByUser && !enableUserMsgMarkdown && 'whitespace-pre-wrap',
isCreatedByUser ? 'dark:text-gray-20' : 'dark:text-gray-100',
)}
>
{content}
<div className={cn(
isSubmitting ? 'submitting' : '',
showCursorState && !!displayText.length ? 'result-streaming' : '',
'markdown prose message-content dark:prose-invert light w-full break-words',
isCreatedByUser && !enableUserMsgMarkdown && 'whitespace-pre-wrap',
isCreatedByUser ? 'dark:text-gray-20' : 'dark:text-gray-100',
className
)}>
{decryptionError ? <span className="text-red-500">{decryptionError}</span> : content}
</div>
</Container>
);
@@ -162,15 +249,10 @@ const MessageContent = ({
{thinkingContent.length > 0 && (
<Thinking key={`thinking-${messageId}`}>{thinkingContent}</Thinking>
)}
<DisplayMessage
key={`display-${messageId}`}
showCursor={showRegularCursor}
text={regularContent}
{...props}
/>
<DisplayMessage key={`display-${messageId}`} showCursor={showRegularCursor} text={regularContent} {...props} />
{unfinishedMessage}
</>
);
};
export default memo(MessageContent);
export default memo(MessageContent);

View File

@@ -12,7 +12,13 @@ export default function Attachment({ attachment }: { attachment?: TAttachment })
if (isImage) {
return (
<Image altText={attachment.filename} imagePath={filepath} height={height} width={width} />
<Image
altText={attachment.filename}
imagePath={filepath}
height={height}
width={width}
className="mb-4"
/>
);
}
return null;

View File

@@ -1,4 +1,5 @@
import { memo } from 'react';
import EncryptionPassphrase from './EncryptionPassphrase';
import MaximizeChatSpace from './MaximizeChatSpace';
import FontSizeSelector from './FontSizeSelector';
import SendMessageKeyEnter from './EnterToSend';
@@ -35,6 +36,9 @@ function Chat() {
<div className="pb-3">
<ScrollButton />
</div>
<div className="pb-3">
<EncryptionPassphrase />
</div>
<ForkSettings />
<div className="pb-3">
<ModularChat />

View File

@@ -0,0 +1,313 @@
import React, { useState, ChangeEvent, FC } from 'react';
import {
Button,
OGDialog,
OGDialogContent,
OGDialogHeader,
OGDialogTitle,
Input,
} from '~/components';
import { Lock, Key } from 'lucide-react';
import { useAuthContext, useLocalize } from '~/hooks';
import { useSetRecoilState } from 'recoil';
import store from '~/store';
import type { TUser } from 'librechat-data-provider';
import { useToastContext } from '~/Providers';
import { useSetUserEncryptionMutation } from '~/data-provider';
/**
* Helper: Convert a Uint8Array to a hex string (for debugging).
*/
const uint8ArrayToHex = (array: Uint8Array): string =>
Array.from(array)
.map((b) => b.toString(16).padStart(2, '0'))
.join('');
/**
* Derive an AES-GCM key from the passphrase using PBKDF2.
*/
const deriveKey = async (passphrase: string, salt: Uint8Array): Promise<CryptoKey> => {
const encoder = new TextEncoder();
const keyMaterial = await window.crypto.subtle.importKey(
'raw',
encoder.encode(passphrase),
'PBKDF2',
false,
['deriveKey']
);
const derivedKey = await window.crypto.subtle.deriveKey(
{
name: 'PBKDF2',
salt,
iterations: 100000,
hash: 'SHA-256',
},
keyMaterial,
{ name: 'AES-GCM', length: 256 },
true,
['encrypt', 'decrypt']
);
// Debug: export the derived key and log it.
const rawKey = await window.crypto.subtle.exportKey('raw', derivedKey);
console.debug('Derived key (hex):', uint8ArrayToHex(new Uint8Array(rawKey)));
return derivedKey;
};
/**
* Decrypts the user's encrypted private key using the provided passphrase.
*/
async function decryptUserPrivateKey(
encryptedPrivateKeyBase64: string,
saltBase64: string,
ivBase64: string,
passphrase: string
): Promise<CryptoKey> {
// Convert salt and IV to Uint8Array.
const salt = new Uint8Array(window.atob(saltBase64).split('').map(c => c.charCodeAt(0)));
const iv = new Uint8Array(window.atob(ivBase64).split('').map(c => c.charCodeAt(0)));
// Derive symmetric key from passphrase.
const encoder = new TextEncoder();
const keyMaterial = await window.crypto.subtle.importKey(
'raw',
encoder.encode(passphrase),
'PBKDF2',
false,
['deriveKey']
);
const symmetricKey = await window.crypto.subtle.deriveKey(
{
name: 'PBKDF2',
salt,
iterations: 100000,
hash: 'SHA-256',
},
keyMaterial,
{ name: 'AES-GCM', length: 256 },
true,
['decrypt']
);
// Decrypt the encrypted private key.
const encryptedPrivateKeyBuffer = new Uint8Array(
window.atob(encryptedPrivateKeyBase64)
.split('')
.map(c => c.charCodeAt(0))
);
const decryptedBuffer = await window.crypto.subtle.decrypt(
{ name: 'AES-GCM', iv },
symmetricKey,
encryptedPrivateKeyBuffer
);
// Import the decrypted key as a CryptoKey.
return await window.crypto.subtle.importKey(
'pkcs8',
decryptedBuffer,
{ name: 'RSA-OAEP', hash: 'SHA-256' },
true,
['decrypt']
);
}
const UserKeysSettings: FC = () => {
const localize = useLocalize();
const { user } = useAuthContext();
const setUser = useSetRecoilState(store.user);
const setDecryptedPrivateKey = useSetRecoilState(store.decryptedPrivateKey);
const { showToast } = useToastContext();
const [dialogOpen, setDialogOpen] = useState<boolean>(false);
const [passphrase, setPassphrase] = useState<string>('');
// Mutation hook for updating user encryption keys.
const { mutateAsync: setEncryption } = useSetUserEncryptionMutation({
onError: (error) => {
console.error('Error updating encryption keys:', error);
showToast({ message: localize('com_ui_upload_error'), status: 'error' });
},
});
const activateEncryption = async (): Promise<{
encryptionPublicKey: string;
encryptedPrivateKey: string;
encryptionSalt: string;
encryptionIV: string;
} | void> => {
if (!passphrase) {
console.error('Passphrase is empty.');
return;
}
if (!user) {
console.error('User object is missing.');
return;
}
try {
console.debug('[Debug] Activating E2EE encryption...');
// Generate a new RSA-OAEP key pair.
const keyPair = await window.crypto.subtle.generateKey(
{
name: 'RSA-OAEP',
modulusLength: 2048,
publicExponent: new Uint8Array([1, 0, 1]),
hash: 'SHA-256',
},
true,
['encrypt', 'decrypt']
);
// Export the public and private keys.
const publicKeyBuffer = await window.crypto.subtle.exportKey('spki', keyPair.publicKey);
const privateKeyBuffer = await window.crypto.subtle.exportKey('pkcs8', keyPair.privateKey);
const publicKeyBase64 = window.btoa(String.fromCharCode(...new Uint8Array(publicKeyBuffer)));
const privateKeyBase64 = window.btoa(String.fromCharCode(...new Uint8Array(privateKeyBuffer)));
console.debug('New public key:', publicKeyBase64);
console.debug('New private key (plaintext):', privateKeyBase64);
// Generate a salt (16 bytes) and IV (12 bytes) for AES-GCM.
const salt = window.crypto.getRandomValues(new Uint8Array(16));
const iv = window.crypto.getRandomValues(new Uint8Array(12));
// Derive a symmetric key from the passphrase using PBKDF2.
const derivedKey = await deriveKey(passphrase, salt);
// Encrypt the private key using AES-GCM.
const encoder = new TextEncoder();
const privateKeyBytes = encoder.encode(privateKeyBase64);
const encryptedPrivateKeyBuffer = await window.crypto.subtle.encrypt(
{ name: 'AES-GCM', iv },
derivedKey,
privateKeyBytes
);
const encryptedPrivateKeyBase64 = window.btoa(String.fromCharCode(...new Uint8Array(encryptedPrivateKeyBuffer)));
// Convert salt and IV to Base64 strings.
const saltBase64 = window.btoa(String.fromCharCode(...salt));
const ivBase64 = window.btoa(String.fromCharCode(...iv));
console.debug('Activation complete:');
console.debug('Encrypted private key:', encryptedPrivateKeyBase64);
console.debug('Salt (base64):', saltBase64);
console.debug('IV (base64):', ivBase64);
return {
encryptionPublicKey: publicKeyBase64,
encryptedPrivateKey: encryptedPrivateKeyBase64,
encryptionSalt: saltBase64,
encryptionIV: ivBase64,
};
} catch (error) {
console.error('Error during activation:', error);
}
};
const disableEncryption = async (): Promise<void> => {
try {
await setEncryption({
encryptionPublicKey: null,
encryptedPrivateKey: null,
encryptionSalt: null,
encryptionIV: null,
});
showToast({ message: localize('com_ui_upload_success') });
setUser((prev) => ({
...prev,
encryptionPublicKey: null,
encryptedPrivateKey: null,
encryptionSalt: null,
encryptionIV: null,
}) as TUser);
setDecryptedPrivateKey(null);
} catch (error) {
console.error('Error disabling encryption:', error);
}
};
const handleSubmit = async (): Promise<void> => {
const newEncryption = await activateEncryption();
if (newEncryption) {
try {
await setEncryption(newEncryption);
showToast({ message: localize('com_ui_upload_success') });
setUser((prev) => ({
...prev,
...newEncryption,
}) as TUser);
// Decrypt the private key and store it in the atom.
const decryptedKey = await decryptUserPrivateKey(
newEncryption.encryptedPrivateKey,
newEncryption.encryptionSalt,
newEncryption.encryptionIV,
passphrase
);
setDecryptedPrivateKey(decryptedKey);
} catch (error) {
console.error('Mutation error:', error);
}
}
setDialogOpen(false);
setPassphrase('');
};
const handleInputChange = (e: ChangeEvent<HTMLInputElement>): void => {
setPassphrase(e.target.value);
};
return (
<>
<div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
<Key className="flex w-[20px] h-[20px]" />
<span id="user-keys-label">{localize('com_nav_chat_encryption_settings')}</span>
</div>
<div className="flex space-x-2">
<Button
variant="outline"
aria-label="Set/Change encryption keys"
onClick={() => setDialogOpen(true)}
data-testid="userKeysSettings"
>
<Lock className="mr-2 flex w-[22px] items-center stroke-1" />
<span>{localize('com_nav_chat_change_passphrase')}</span>
</Button>
{user?.encryptionPublicKey && (
<Button
variant="outline"
aria-label="Disable encryption"
onClick={disableEncryption}
data-testid="disableEncryption"
>
<span>Disable Encryption</span>
</Button>
)}
</div>
</div>
{user?.encryptionPublicKey && (
<div className="pt-2 text-xs text-gray-500">
{localize('com_nav_chat_current_public_key')}: {user.encryptionPublicKey.slice(0, 30)}...
</div>
)}
<OGDialog open={dialogOpen} onOpenChange={setDialogOpen}>
<OGDialogContent className="w-11/12 max-w-sm" style={{ borderRadius: '12px' }}>
<OGDialogHeader>
<OGDialogTitle>{localize('com_nav_chat_enter_your_passphrase')}</OGDialogTitle>
</OGDialogHeader>
<div className="p-4 flex flex-col gap-4">
<Input
type="password"
value={passphrase}
onChange={handleInputChange}
placeholder={localize('com_nav_chat_passphrase_placeholder')}
aria-label={localize('com_nav_chat_enter_your_passphrase')}
/>
<Button variant="outline" onClick={handleSubmit}>
{localize('com_ui_submit')}
</Button>
</div>
</OGDialogContent>
</OGDialog>
</>
);
};
export default UserKeysSettings;

View File

@@ -553,8 +553,10 @@ const bedrockAnthropic: SettingsConfiguration = [
bedrock.topP,
bedrock.topK,
baseDefinitions.stop,
bedrock.region,
librechat.resendFiles,
bedrock.region,
anthropic.thinking,
anthropic.thinkingBudget,
];
const bedrockMistral: SettingsConfiguration = [
@@ -564,8 +566,8 @@ const bedrockMistral: SettingsConfiguration = [
bedrock.maxTokens,
mistral.temperature,
mistral.topP,
bedrock.region,
librechat.resendFiles,
bedrock.region,
];
const bedrockCohere: SettingsConfiguration = [
@@ -575,8 +577,8 @@ const bedrockCohere: SettingsConfiguration = [
bedrock.maxTokens,
cohere.temperature,
cohere.topP,
bedrock.region,
librechat.resendFiles,
bedrock.region,
];
const bedrockGeneral: SettingsConfiguration = [
@@ -585,8 +587,8 @@ const bedrockGeneral: SettingsConfiguration = [
librechat.maxContextTokens,
meta.temperature,
meta.topP,
bedrock.region,
librechat.resendFiles,
bedrock.region,
];
const bedrockAnthropicCol1: SettingsConfiguration = [
@@ -602,8 +604,10 @@ const bedrockAnthropicCol2: SettingsConfiguration = [
bedrock.temperature,
bedrock.topP,
bedrock.topK,
bedrock.region,
librechat.resendFiles,
bedrock.region,
anthropic.thinking,
anthropic.thinkingBudget,
];
const bedrockMistralCol1: SettingsConfiguration = [
@@ -617,8 +621,8 @@ const bedrockMistralCol2: SettingsConfiguration = [
bedrock.maxTokens,
mistral.temperature,
mistral.topP,
bedrock.region,
librechat.resendFiles,
bedrock.region,
];
const bedrockCohereCol1: SettingsConfiguration = [
@@ -632,8 +636,8 @@ const bedrockCohereCol2: SettingsConfiguration = [
bedrock.maxTokens,
cohere.temperature,
cohere.topP,
bedrock.region,
librechat.resendFiles,
bedrock.region,
];
const bedrockGeneralCol1: SettingsConfiguration = [
@@ -646,8 +650,8 @@ const bedrockGeneralCol2: SettingsConfiguration = [
librechat.maxContextTokens,
meta.temperature,
meta.topP,
bedrock.region,
librechat.resendFiles,
bedrock.region,
];
export const settings: Record<string, SettingsConfiguration | undefined> = {

View File

@@ -897,7 +897,7 @@ export const useUploadAssistantAvatarMutation = (
unknown // context
> => {
return useMutation([MutationKeys.assistantAvatarUpload], {
// eslint-disable-next-line @typescript-eslint/no-unused-vars
mutationFn: ({ postCreation, ...variables }: t.AssistantAvatarVariables) =>
dataService.uploadAssistantAvatar(variables),
...(options || {}),
@@ -1068,3 +1068,24 @@ export const useAcceptTermsMutation = (
onMutate: options?.onMutate,
});
};
export const useSetUserEncryptionMutation = (
options?: {
onSuccess?: (
data: t.UpdateUserEncryptionResponse,
variables: t.UpdateUserEncryptionRequest,
context?: unknown
) => void;
onError?: (
error: unknown,
variables: t.UpdateUserEncryptionRequest,
context?: unknown
) => void;
}
): UseMutationResult<t.UpdateUserEncryptionResponse, unknown, t.UpdateUserEncryptionRequest, unknown> => {
return useMutation([MutationKeys.updateUserEncryption], {
mutationFn: (variables: t.UpdateUserEncryptionRequest) =>
dataService.updateUserEncryption(variables),
...(options || {}),
});
};

View File

@@ -234,6 +234,6 @@ export default function useSSE(
sse.dispatchEvent(e);
}
};
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [submission]);
}

View File

@@ -139,6 +139,8 @@
"com_endpoint_anthropic_maxoutputtokens": "Maximale Anzahl von Token, die in der Antwort erzeugt werden können. Gib einen niedrigeren Wert für kürzere Antworten und einen höheren Wert für längere Antworten an. Hinweis: Die Modelle können auch vor Erreichen dieses Maximums stoppen.",
"com_endpoint_anthropic_prompt_cache": "Prompt-Caching ermöglicht die Wiederverwendung von umfangreichen Kontexten oder Anweisungen über mehrere API-Aufrufe hinweg, wodurch Kosten und Latenzzeiten reduziert werden",
"com_endpoint_anthropic_temp": "Reicht von 0 bis 1. Verwende Temperaturen näher an 0 für analytische / Multiple-Choice-Aufgaben und näher an 1 für kreative und generative Aufgaben. Wir empfehlen, entweder dies oder Top P zu ändern, aber nicht beides.",
"com_endpoint_anthropic_thinking": "Aktiviert internes logisches Denken für unterstützte Claude-Modelle (3.7 Sonnet). Hinweis: Erfordert, dass \"Denkbudget\" festgelegt und niedriger als \"Max. Ausgabe-Token\" ist",
"com_endpoint_anthropic_thinking_budget": "Bestimmt die maximale Anzahl an Token, die Claude für seinen internen Denkprozess verwenden darf. Ein höheres Budget kann die Antwortqualität verbessern, indem es eine gründlichere Analyse bei komplexen Problemen ermöglicht. Claude nutzt jedoch möglicherweise nicht das gesamte zugewiesene Budget, insbesondere bei Werten über 32.000. Diese Einstellung muss niedriger sein als \"Max. Ausgabe-Token\".",
"com_endpoint_anthropic_topk": "Top-k ändert, wie das Modell Token für die Ausgabe auswählt. Ein Top-k von 1 bedeutet, dass das ausgewählte Token das wahrscheinlichste unter allen Token im Vokabular des Modells ist (auch \"Greedy Decoding\" genannt), während ein Top-k von 3 bedeutet, dass das nächste Token aus den 3 wahrscheinlichsten Token ausgewählt wird (unter Verwendung der Temperatur).",
"com_endpoint_anthropic_topp": "Top-p ändert, wie das Modell Token für die Ausgabe auswählt. Token werden von den wahrscheinlichsten K (siehe topK-Parameter) bis zu den am wenigsten wahrscheinlichen ausgewählt, bis die Summe ihrer Wahrscheinlichkeiten dem Top-p-Wert entspricht.",
"com_endpoint_assistant": "Assistent",
@@ -243,6 +245,8 @@
"com_endpoint_stop": "Stop-Sequenzen",
"com_endpoint_stop_placeholder": "Trenne Stoppwörter durch Drücken der `Enter`-Taste",
"com_endpoint_temperature": "Temperatur",
"com_endpoint_thinking": "Denken",
"com_endpoint_thinking_budget": "Denkbudget",
"com_endpoint_top_k": "Top K",
"com_endpoint_top_p": "Top P",
"com_endpoint_use_active_assistant": "Aktiven Assistenten verwenden",
@@ -434,7 +438,7 @@
"com_sidepanel_parameters": "KI-Einstellungen",
"com_sidepanel_select_agent": "Wähle einen Agenten",
"com_sidepanel_select_assistant": "Assistenten auswählen",
"com_ui_2fa_account_security": "Die Zwei-Faktor-Authentifizierung bietet deinem Konto eine zusätzliche Sicherheitsebene.",
"com_ui_2fa_account_security": "Die Zwei-Faktor-Authentifizierung bietet Ihrem Konto eine zusätzliche Sicherheitsebene.",
"com_ui_2fa_disable": "2FA deaktivieren",
"com_ui_2fa_disable_error": "Beim Deaktivieren der Zwei-Faktor-Authentifizierung ist ein Fehler aufgetreten.",
"com_ui_2fa_disabled": "2FA wurde deaktiviert.",
@@ -525,6 +529,7 @@
"com_ui_chat_history": "Chatverlauf",
"com_ui_clear": "Löschen",
"com_ui_clear_all": "Auswahl löschen",
"com_ui_client_id": "Client-ID",
"com_ui_client_secret": "Client Secret",
"com_ui_close": "Schließen",
"com_ui_close_menu": "Menü schließen",
@@ -590,7 +595,7 @@
"com_ui_download": "Herunterladen",
"com_ui_download_artifact": "Artefakt herunterladen",
"com_ui_download_backup": "Backup-Codes herunterladen",
"com_ui_download_backup_tooltip": "Bevor du fortfährst, lade bitte deine Backup-Codes herunter. Du benötigst sie, um den Zugang wiederherzustellen, falls du dein Authentifizierungsgerät verlierst.",
"com_ui_download_backup_tooltip": "Bevor Sie fortfahren, laden Sie bitte Ihre Backup-Codes herunter. Sie benötigen sie, um den Zugang wiederherzustellen, falls Sie Ihr Authentifizierungsgerät verlieren.",
"com_ui_download_error": "Fehler beim Herunterladen der Datei. Die Datei wurde möglicherweise gelöscht.",
"com_ui_dropdown_variables": "Dropdown-Variablen:",
"com_ui_dropdown_variables_info": "Erstellen Sie benutzerdefinierte Dropdown-Menüs für Ihre Eingabeaufforderungen: `{{variable_name:option1|option2|option3}}`",
@@ -677,6 +682,7 @@
"com_ui_more_info": "Mehr Infos",
"com_ui_my_prompts": "Meine Prompts",
"com_ui_name": "Name",
"com_ui_new": "Neu",
"com_ui_new_chat": "Neuer Chat",
"com_ui_next": "Weiter",
"com_ui_no": "Nein",

View File

@@ -551,6 +551,11 @@
"com_ui_controls": "Controls",
"com_ui_copied": "Copied!",
"com_ui_copied_to_clipboard": "Copied to clipboard",
"com_nav_chat_encryption_settings": "Encryption Settings",
"com_nav_chat_change_passphrase": "Change Passphrase",
"com_nav_chat_enter_your_passphrase": "Enter your passphrase",
"com_nav_chat_passphrase_placeholder": "Type your encryption passphrase here...",
"com_nav_chat_current_public_key": "Current Public Key",
"com_ui_copy_code": "Copy code",
"com_ui_copy_link": "Copy link",
"com_ui_copy_to_clipboard": "Copy to clipboard",

View File

@@ -1,4 +1,6 @@
{
"chat_direction_left_to_right": "algo debería ir aquí pero está vacío",
"chat_direction_right_to_left": "algo debería ir aquí pero está vacío",
"com_a11y_ai_composing": "La IA está componiendo la respuesta",
"com_a11y_end": "La IA ha finalizado su respuesta",
"com_a11y_start": "La IA ha comenzado su respuesta",
@@ -18,13 +20,16 @@
"com_agents_not_available": "Agente no disponible",
"com_agents_search_name": "Buscar agentes por nombre",
"com_agents_update_error": "Hubo un error al actualizar su agente.",
"com_assistants_action_attempt": "El asistente quiere hablar con {{0}}",
"com_assistants_actions": "Acciones",
"com_assistants_actions_disabled": "Necesita crear un asistente antes de añadir acciones.",
"com_assistants_actions_info": "Permita que su Asistente recupere información o realice acciones a través de API's",
"com_assistants_add_actions": "Añadir Acciones",
"com_assistants_add_tools": "Añadir Herramientas",
"com_assistants_allow_sites_you_trust": "Solo permite sitios en los que confíes.",
"com_assistants_append_date": "Añadir Fecha y Hora Actual",
"com_assistants_append_date_tooltip": "Cuando está habilitado, la fecha y hora actual del cliente se adjuntarán a las instrucciones del sistema del asistente.",
"com_assistants_attempt_info": "El asistente quiere enviar lo siguiente:",
"com_assistants_available_actions": "Acciones Disponibles",
"com_assistants_capabilities": "Capacidades",
"com_assistants_code_interpreter": "Intérprete de Código",
@@ -59,6 +64,7 @@
"com_assistants_update_error": "Hubo un error al actualizar su asistente.",
"com_assistants_update_success": "Actualizado con éxito",
"com_auth_already_have_account": "¿Ya tiene una cuenta?",
"com_auth_apple_login": "Inicia con Apple",
"com_auth_back_to_login": "Volver al inicio de sesión",
"com_auth_click": "Haga clic",
"com_auth_click_here": "Haz clic aquí",
@@ -117,9 +123,11 @@
"com_auth_submit_registration": "Enviar registro",
"com_auth_to_reset_your_password": "para restablecer su contraseña.",
"com_auth_to_try_again": "para intentar de nuevo.",
"com_auth_two_factor": "Revisa tu aplicación preferida de OTP para obtener el código",
"com_auth_username": "Nombre de usuario (opcional)",
"com_auth_username_max_length": "El nombre de usuario debe tener menos de 20 caracteres",
"com_auth_username_min_length": "El nombre de usuario debe tener al menos 2 caracteres",
"com_auth_verify_your_identity": "Verifica Tu Identidad",
"com_auth_welcome_back": "Bienvenido de nuevo",
"com_click_to_download": "(haga clic aquí para descargar)",
"com_download_expired": "Descarga expirada",
@@ -420,6 +428,8 @@
"com_sidepanel_parameters": "Parámetros",
"com_sidepanel_select_agent": "Seleccione un Agente",
"com_sidepanel_select_assistant": "Seleccionar un Asistente",
"com_ui_2fa_enable": "Activa 2FA",
"com_ui_2fa_enabled": "2FA ha sido activada",
"com_ui_accept": "Acepto",
"com_ui_add": "Agregar",
"com_ui_add_model_preset": "Agregar un modelo o configuración preestablecida para una respuesta adicional",

View File

@@ -87,6 +87,7 @@
"com_auth_email_verification_redirecting": "Suunatakse ümber {{0}} sekundi pärast...",
"com_auth_email_verification_resend_prompt": "Kas sa ei saanud e-kirja?",
"com_auth_email_verification_success": "E-post kinnitatud",
"com_auth_email_verifying_ellipsis": "Kontrollimine...",
"com_auth_error_create": "Konto registreerimisel tekkis viga. Proovige uuesti.",
"com_auth_error_invalid_reset_token": "See parooli lähtestamise tunnus pole enam kehtiv.",
"com_auth_error_login": "Sisselogimine esitatud teabega ei õnnestu. Palun kontrolli oma andmeid ja proovi uuesti.",
@@ -123,9 +124,11 @@
"com_auth_submit_registration": "Saada registreerimine",
"com_auth_to_reset_your_password": "parooli lähtestamiseks.",
"com_auth_to_try_again": "uuesti proovimiseks.",
"com_auth_two_factor": "Kontrolli oma eelistatud ühekordse parooli rakendust koodi saamiseks",
"com_auth_username": "Kasutajanimi (valikuline)",
"com_auth_username_max_length": "Kasutajanimi peab olema vähem kui 20 tähemärki",
"com_auth_username_min_length": "Kasutajanimi peab olema vähemalt 2 tähemärki",
"com_auth_verify_your_identity": "Kontrolli",
"com_auth_welcome_back": "Teretulemast tagasi",
"com_click_to_download": "(vajuta siia, et alla laadida)",
"com_download_expired": "(allalaadimine aegunud)",
@@ -265,6 +268,7 @@
"com_files_table": "Failide tabel",
"com_generated_files": "Genereeritud failid:",
"com_hide_examples": "Peida näited",
"com_nav_2fa": "Kaheastmeline autentimine (2FA)",
"com_nav_account_settings": "Konto seaded",
"com_nav_always_make_prod": "Tee uued versioonid alati toodangusse",
"com_nav_archive_created_at": "Arhiveerimise kuupäev",
@@ -434,6 +438,16 @@
"com_sidepanel_parameters": "Parameetrid",
"com_sidepanel_select_agent": "Vali agent",
"com_sidepanel_select_assistant": "Vali assistent",
"com_ui_2fa_account_security": "Kaheastmeline autentimine lisab teie kontole täiendava turvalisuse kihi",
"com_ui_2fa_disable": "Lülita 2FA välja",
"com_ui_2fa_disable_error": "Tekkis viga kaheastmelise autentimise väljalülitamisel",
"com_ui_2fa_disabled": "2FA on välja lülitatud",
"com_ui_2fa_enable": "Aktiveeri 2FA",
"com_ui_2fa_enabled": "2FA on aktiveeritud",
"com_ui_2fa_generate_error": "Kaheastmelise autentimise seadete genereerimisel tekkis viga",
"com_ui_2fa_invalid": "Vale kaheastmeline autentimise kood",
"com_ui_2fa_setup": "Seadista 2FA",
"com_ui_2fa_verified": "Kaheastmeline autentimine õnnestus",
"com_ui_accept": "Nõustun",
"com_ui_add": "Lisa",
"com_ui_add_model_preset": "Lisa mudel või eelseadistus täiendava vastuse jaoks",
@@ -484,6 +498,9 @@
"com_ui_azure": "Azure",
"com_ui_back_to_chat": "Tagasi vestlusesse",
"com_ui_back_to_prompts": "Tagasi sisendite juurde",
"com_ui_backup_codes": "Varukoodid",
"com_ui_backup_codes_regenerate_error": "Varukoodide loomisel tekkis viga",
"com_ui_backup_codes_regenerated": "Varukoodide loomine oli edukas",
"com_ui_basic": "Põhiline",
"com_ui_basic_auth_header": "Põhiline autentimise päis",
"com_ui_bearer": "Bearer",
@@ -520,6 +537,7 @@
"com_ui_collapse_chat": "Ahenda vestlus",
"com_ui_command_placeholder": "Valikuline: sisesta sisendi jaoks käsk või kasutatakse nime",
"com_ui_command_usage_placeholder": "Vali sisend käsu või nime järgi",
"com_ui_complete_setup": "Valmis",
"com_ui_confirm_action": "Kinnita tegevus",
"com_ui_confirm_admin_use_change": "Selle seadistuse muutmine blokeerib juurdepääsu administraatoritele, sealhulgas sinule endale. Oled sa kindel, et sa soovid jätkata?",
"com_ui_confirm_change": "Kinnita muudatus",
@@ -573,8 +591,11 @@
"com_ui_descending": "Desc",
"com_ui_description": "Kirjeldus",
"com_ui_description_placeholder": "Valikuline: sisesta sisendi jaoks kuvatav kirjeldus",
"com_ui_disabling": "Välja lülitamine...",
"com_ui_download": "Laadi alla",
"com_ui_download_artifact": "Laadi artefakt alla",
"com_ui_download_backup": "Laadi alla varukoodid",
"com_ui_download_backup_tooltip": "Enne jätkamist laadi alla oma varukoodid. Vajad neid ligipääsu taastamiseks, kui peaksid oma autentimisseadme kaotama.",
"com_ui_download_error": "Viga faili allalaadimisel. Fail võib olla kustutatud.",
"com_ui_drag_drop": "Lohistage",
"com_ui_dropdown_variables": "Rippmenüü muutujad:",
@@ -623,6 +644,9 @@
"com_ui_fork_split_target_setting": "Alusta vaikimisi sihtsõnumist hargnemist",
"com_ui_fork_success": "Vestluse hargnemine õnnestus",
"com_ui_fork_visible": "Ainult nähtavad sõnumid",
"com_ui_generate_backup": "Loo varukoodid",
"com_ui_generate_qrcode": "Loo QR-kood",
"com_ui_generating": "Loomine...",
"com_ui_global_group": "Ülene grupp",
"com_ui_go_back": "Mine tagasi",
"com_ui_go_to_conversation": "Mine vestlusesse",
@@ -631,6 +655,7 @@
"com_ui_host": "Host",
"com_ui_idea": "Ideed",
"com_ui_image_gen": "Pildi genereerimine",
"com_ui_import": "Impordi",
"com_ui_import_conversation_error": "Vestluste importimisel tekkis viga",
"com_ui_import_conversation_file_type_error": "Toetamatu imporditüüp",
"com_ui_import_conversation_info": "Impordi vestlused JSON-failist",
@@ -663,6 +688,7 @@
"com_ui_new_chat": "Uus vestlus",
"com_ui_next": "Järgmine",
"com_ui_no": "Ei",
"com_ui_no_backup_codes": "Varukoodid puuduvad. Palun loo uued",
"com_ui_no_bookmarks": "Tundub, et sul pole veel järjehoidjaid. Klõpsa vestlusele ja lisa uus",
"com_ui_no_category": "Kategooriat pole",
"com_ui_no_changes": "Uuendamiseks pole muudatusi",
@@ -671,6 +697,7 @@
"com_ui_no_valid_items": "Sobivad üksused puuduvad!",
"com_ui_none": "Puudub",
"com_ui_none_selected": "Ühtegi pole valitud",
"com_ui_not_used": "Kasutamata",
"com_ui_nothing_found": "Midagi ei leitud",
"com_ui_oauth": "OAuth",
"com_ui_of": "kohta",
@@ -698,6 +725,8 @@
"com_ui_read_aloud": "Loe valjusti",
"com_ui_refresh_link": "Värskenda linki",
"com_ui_regenerate": "Genereeri uuesti",
"com_ui_regenerate_backup": "Loo varukoodid uuesti",
"com_ui_regenerating": "Uuesti loomine...",
"com_ui_region": "Piirkond",
"com_ui_rename": "Nimeta ümber",
"com_ui_rename_prompt": "Nimeta sisend ümber",
@@ -720,6 +749,7 @@
"com_ui_schema": "Skeem",
"com_ui_scope": "Ulatus",
"com_ui_search": "Otsi",
"com_ui_secret_key": "Salavõti",
"com_ui_select": "Vali",
"com_ui_select_file": "Vali fail",
"com_ui_select_model": "Vali mudel",
@@ -744,6 +774,7 @@
"com_ui_shared_link_not_found": "Jagatud linki ei leitud",
"com_ui_shared_prompts": "Jagatud sisendid",
"com_ui_shop": "Ostlemine",
"com_ui_show": "Kuva",
"com_ui_show_all": "Näita kõiki",
"com_ui_show_qr": "Näita QR-koodi",
"com_ui_sign_in_to_domain": "Logi sisse {{0}}",
@@ -781,10 +812,14 @@
"com_ui_upload_invalid_var": "Fail on üleslaadimiseks vigane. Peab olema pilt, mis ei ületa {{0}} MB",
"com_ui_upload_success": "Faili üleslaadimine õnnestus",
"com_ui_upload_type": "Vali üleslaadimise tüüp",
"com_ui_use_2fa_code": "Kasuta hoopis 2FA koodi",
"com_ui_use_backup_code": "Kasuta hoopis varukoodi",
"com_ui_use_micrphone": "Kasuta mikrofoni",
"com_ui_use_prompt": "Kasuta sisendit",
"com_ui_used": "Kasutatud",
"com_ui_variables": "Muutujad",
"com_ui_variables_info": "Kasuta oma tekstis topelt sulgusid, et luua muutujaid, nt `{{näidismuutuja}}`, et hiljem sisendi kasutamisel täita.",
"com_ui_verify": "Kontrolli",
"com_ui_version_var": "Versioon {{0}}",
"com_ui_versions": "Versioonid",
"com_ui_view_source": "Vaata algset vestlust",

View File

@@ -20,7 +20,7 @@
"com_agents_not_available": "הסוכן לא זמין",
"com_agents_search_name": "חפש סוכן לפי שם",
"com_agents_update_error": "אירעה שגיאה בעדכון הסוכן שלך.",
"com_assistants_action_attempt": "הסייען מעוניין לתקשר עם {{0}}",
"com_assistants_action_attempt": "הסוכן מעוניין לתקשר עם {{0}}",
"com_assistants_actions": "פעולות",
"com_assistants_actions_disabled": "עליך ליצור סייען לפני הוספת פעולות.",
"com_assistants_actions_info": "אפשר לסייען לאחזר מידע או לבצע פעולות באמצעות API",
@@ -37,7 +37,7 @@
"com_assistants_code_interpreter_info": "מתורגמן קוד מאפשר לסייען לכתוב ולהריץ קוד. כלי זה יכול לעבד קבצים עם נתונים ועיצוב מגוונים, וליצור קבצים כגון גרפים.",
"com_assistants_completed_action": "תקשר עם {{0}}",
"com_assistants_completed_function": "מריץ {{0}}",
"com_assistants_conversation_starters": "התחלת שיחות",
"com_assistants_conversation_starters": "התחלות שיחה",
"com_assistants_conversation_starters_placeholder": "הכנס פתיח לשיחה",
"com_assistants_create_error": "אירעה שגיאה ביצירת הסייען שלך.",
"com_assistants_create_success": "נוצר בהצלחה",
@@ -87,6 +87,7 @@
"com_auth_email_verification_redirecting": "מפנה מחדש בעוד {{0}} שניות...",
"com_auth_email_verification_resend_prompt": "לא קיבלת את הדוא\"ל?",
"com_auth_email_verification_success": "הדוא\"ל אומת בהצלחה",
"com_auth_email_verifying_ellipsis": "מאמת...",
"com_auth_error_create": "אירעה שגיאה בניסיון לרשום את החשבון שלך. בבקשה נסה שוב.",
"com_auth_error_invalid_reset_token": "אסימון איפוס הסיסמה הזה אינו תקף עוד.",
"com_auth_error_login": "לא ניתן להתחבר עם המידע שסופק. אנא בדוק את האישורים שלך ונסה שוב.",
@@ -123,9 +124,11 @@
"com_auth_submit_registration": "שלח רישום",
"com_auth_to_reset_your_password": "כדי לאפס את הסיסמה שלך.",
"com_auth_to_try_again": "כדי לנסות שוב.",
"com_auth_two_factor": "בדוק את יישום הסיסמה החד-פעמית שלך לקבלת קוד",
"com_auth_username": "שם משתמש (אופציונלי)",
"com_auth_username_max_length": "שם המשתמש חייב להיות פחות מ-20 תווים",
"com_auth_username_min_length": "שם משתמש חייב להיות לפחות 2 תווים",
"com_auth_verify_your_identity": "אמת את הזהות שלך",
"com_auth_welcome_back": "ברוכים הבאים",
"com_click_to_download": "(לחץ כאן להורדה)",
"com_download_expired": "(פג תוקף ההורדה)",
@@ -137,6 +140,8 @@
"com_endpoint_anthropic_maxoutputtokens": "מספר האסימונים המרבי שניתן להפיק בתגובה. ציין ערך נמוך יותר עבור תגובות קצרות יותר וערך גבוה יותר עבור תגובות ארוכות יותר.",
"com_endpoint_anthropic_prompt_cache": "שמירת מטמון מהירה מאפשרת שימוש חוזר בהקשר גדול או בהוראות בקריאות API, תוך הפחתת העלויות וההשהייה",
"com_endpoint_anthropic_temp": "נע בין 0 ל-1. השתמש בטמפ' הקרובה יותר ל-0 עבור בחירה אנליטית / מרובה, וקרוב יותר ל-1 עבור משימות יצירתיות ויצירתיות. אנו ממליצים לשנות את זה או את Top P אבל לא את שניהם.",
"com_endpoint_anthropic_thinking": "מאפשר חשיבה פנימית עבור דגמי Claude נתמכים (3.7 Sonnet). הערה: דורש שההגדרה של 'תקציב חשיבה' תהיה נמוכה מ'מקסימום טוקנים לפלט'",
"com_endpoint_anthropic_thinking_budget": "קובע את מספר הטוקנים המקסימלי שקלוד רשאי להשתמש בו עבור תהליך החשיבה הפנימי. תקציב גבוה יותר עשוי לשפר את איכות התשובה על ידי מתן אפשרות לניתוח מעמיק יותר של בעיות מורכבות, אם כי קלוד לא בהכרח ישתמש בכל התקציב שהוקצה, במיוחד בטווחים שמעל 32K. הגדרה זו חייבת להיות נמוכה מ'מקסימום טוקנים לפלט'.",
"com_endpoint_anthropic_topk": "Top-k משנה את האופן שבו המודל בוחר אסימונים לפלט. Top-k של 1 פירושו שהאסימון שנבחר הוא הסביר ביותר מבין כל האסימונים באוצר המילים של הדגם (נקרא גם פענוח חמדן), בעוד ש-top-k של 3 פירושו שהאסימון הבא נבחר מבין 3 הכי הרבה. אסימונים סבירים (באמצעות טמפרטורה).",
"com_endpoint_anthropic_topp": "Top-p משנה את האופן שבו המודל בוחר אסימונים לפלט. אסימונים נבחרים מבין רוב K (ראה פרמטר topK) הסביר לפחות עד שסכום ההסתברויות שלהם שווה לערך העליון-p.",
"com_endpoint_assistant": "סייען",
@@ -242,6 +247,8 @@
"com_endpoint_stop": "רצף לעצירה",
"com_endpoint_stop_placeholder": "הפרד ערכים על ידי לחיצה על 'Enter'",
"com_endpoint_temperature": "טמפרטורה",
"com_endpoint_thinking": "חשיבה",
"com_endpoint_thinking_budget": "תקציב חשיבה",
"com_endpoint_top_k": "Top K",
"com_endpoint_top_p": "Top P",
"com_endpoint_use_active_assistant": "השתמש ב-סייען פעיל",
@@ -264,6 +271,7 @@
"com_files_table": "השדה חייב להכיל תוכן, הוא אינו יכול להישאר ריק",
"com_generated_files": "קבצים שנוצרו:",
"com_hide_examples": "הסתר דוגמאות",
"com_nav_2fa": "אימות דו-שלבי (2FA)",
"com_nav_account_settings": "הגדרות חשבון",
"com_nav_always_make_prod": "ייצר תמיד גרסאות חדשות",
"com_nav_archive_created_at": "תאריך ייצור",
@@ -272,10 +280,10 @@
"com_nav_archived_chats_empty": "אין שיחות מארכיון.",
"com_nav_at_command": "@-פקודה",
"com_nav_at_command_description": "הפקודה \"@\" משמשת כמנגנון הפעלה/החלפה של נקודות קצה, מודלים, הגדרות קבועות מראש וכו'.",
"com_nav_audio_play_error": "שגיאה בהפעלת האודיו: {{0}}",
"com_nav_audio_play_error": "שגיאה בהפעלת אודיו: {{0}}",
"com_nav_audio_process_error": "שגיאה בעיבוד האודיו: {{0}}",
"com_nav_auto_scroll": "Auto-s גלול אל הכי חדש בפתיחה",
"com_nav_auto_send_prompts": "הנחיות (פרומפטים) לשליחה אוטומטית",
"com_nav_auto_send_prompts": "שליחת הנחיות (פרומפטים) אוטומטית",
"com_nav_auto_send_text": "טקסט לשליחה אוטומטית",
"com_nav_auto_send_text_disabled": "הגדר -1 כדי להשבית",
"com_nav_auto_transcribe_audio": "תמלול אוטומטי של אודיו",
@@ -294,10 +302,10 @@
"com_nav_close_sidebar": "סגור סרגל צד",
"com_nav_commands": "פקודות",
"com_nav_confirm_clear": "אשר ניקוי",
"com_nav_conversation_mode": "ביקורות בהמתנה",
"com_nav_conversation_mode": "מצב שיחה",
"com_nav_convo_menu_options": "אפשרויות מצב שיחה",
"com_nav_db_sensitivity": "רגישות דציבלים",
"com_nav_delete_account": "מחק חשבון",
"com_nav_delete_account": "מחיקת החשבון",
"com_nav_delete_account_button": "מחק את החשבון שלי לצמיתות",
"com_nav_delete_account_confirm": "מחק חשבון - אתה בטוח?",
"com_nav_delete_account_email_placeholder": "אנא הזן את כתובת הדוא\"ל של החשבון שלך",
@@ -321,13 +329,15 @@
"com_nav_export_type": "סוג",
"com_nav_external": "חיצוני",
"com_nav_font_size": "גודל גופן",
"com_nav_font_size_base": "בינוני",
"com_nav_font_size_base": "מדיום",
"com_nav_font_size_lg": "גדול",
"com_nav_font_size_sm": "קטן",
"com_nav_font_size_xl": "גדול מאוד",
"com_nav_font_size_xs": "קטן מאוד",
"com_nav_font_size_xs": "קט מאוד",
"com_nav_help_faq": "עזרה ושאלות נפוצות",
"com_nav_hide_panel": "הסתר לוח הצד הימני ביותר",
"com_nav_info_code_artifacts": "אפשר הצגה של רכיבי תצוגת קוד ניסיוניים לצד הצ'אט",
"com_nav_info_code_artifacts_agent": "אפשר שימוש ברכיבי תצוגת קוד עבור סוכן זה כברירת מחדל, מתווספות הוראות נוספות ספציפיות לשימוש ברכיבי התצוגה אלא אם \"מצב הנחיה מותאם אישית\" מופעל.",
"com_nav_info_custom_prompt_mode": "כאשר אפשרות זו מופעלת, הנחיית ברירת המחדל של מערכת רכיבי תצוגה לא תיכלל. כל ההוראות ליצירת רכיבי תצוגה יהיו חייבות להינתן באופן ידני במצב זה.",
"com_nav_info_enter_to_send": "כאשר מופעל, לחיצה על \"ENTER\" תשלח את ההודעה שלך, כאשר מושבת לחיצה על \"Enter\" תוסיף שורה חדשה, ותצטרך ללחוץ על \"CTRL + ENTER\" כדי לשלוח את ההודעה.",
"com_nav_info_fork_change_default": "'הודעות ישירות בלבד' כולל רק את הנתיב הישיר להודעה שנבחרה. 'כלול הסתעפויות קשורות' מוסיף את כל ההסתעפויות הקשורות לאורך הנתיב. 'כלול הכל עד כאן/מכאן' כולל את כל ההודעות וההסתעפויות המחוברות.",
@@ -338,7 +348,7 @@
"com_nav_info_show_thinking": "כאשר אפשרות זו מופעלת, תיבות תצוגה שמציגות את תהליך החשיבה של הבינה המלאכותית יופיעו פתוחות כברירת מחדל, כך שתוכל לראות את תהליך הניתוח בזמן אמת. כאשר האפשרות מושבתת, תיבות הבחירה יישארו סגורות כברירת מחדל, מה שיוצר ממשק נקי וזורם יותר.",
"com_nav_info_user_name_display": "כאשר אפשרות זו מופעלת, שם המשתמש של השולח יוצג מעל כל הודעה שאתה שולח. כאשר האפשרות מושבתת, יוצג רק הכיתוב \"אתה\" מעל ההודעות שלך.",
"com_nav_lang_arabic": "ערבית (العربية)",
"com_nav_lang_auto": "זיהוי אוטומטי",
"com_nav_lang_auto": "זיהוי באופן אוטומטי",
"com_nav_lang_brazilian_portuguese": "פורטוגזית ברזילאית (Português Brasileiro)",
"com_nav_lang_chinese": "סינית (中文)",
"com_nav_lang_dutch": "הולנדית (Nederlands)",
@@ -370,13 +380,13 @@
"com_nav_no_search_results": "לא נמצאו תוצאות בחיפוש",
"com_nav_not_supported": "לא נתמך",
"com_nav_open_sidebar": "פתח סרגל צד",
"com_nav_playback_rate": "קצב השמעת אודיו",
"com_nav_playback_rate": "קצב השמעת האודיו",
"com_nav_plugin_auth_error": "אירעה שגיאה בניסיון לאמת את הפלאגין הזה. בבקשה נסה שוב.",
"com_nav_plugin_install": "התקן",
"com_nav_plugin_search": "תוספי חיפוש",
"com_nav_plugin_store": "חנות פלאגין",
"com_nav_plugin_uninstall": "הסר התקנה",
"com_nav_plus_command": "פקודת+-",
"com_nav_plus_command": "פקודות+-",
"com_nav_plus_command_description": "הפעל או בטל את הפקודה '+' כדי להוסיף הגדרת תגובות מרובות",
"com_nav_profile_picture": "תמונת פרופיל",
"com_nav_save_drafts": "שמיר את האפצה באותו מחשב",
@@ -431,6 +441,16 @@
"com_sidepanel_parameters": "פרמטרים",
"com_sidepanel_select_agent": "בחר סוכן",
"com_sidepanel_select_assistant": "בחר סייען",
"com_ui_2fa_account_security": "אימות דו-שלבי מוסיף שכבת אבטחה נוספת לחשבון שלך",
"com_ui_2fa_disable": "השבת אימות דו-שלבי (2FA)",
"com_ui_2fa_disable_error": "התרחשה שגיאה בעת ביטול האימות הדו-שלבי",
"com_ui_2fa_disabled": "האימות הדו-שלבי הושבת (2FA)",
"com_ui_2fa_enable": "אפשר אימות דו-שלבי (2FA)",
"com_ui_2fa_enabled": "האימות הדו-שלבי (2FA) הופעל",
"com_ui_2fa_generate_error": "תרחשה שגיאה בעת יצירת הגדרות האימות הדו-שלבי (2FA)",
"com_ui_2fa_invalid": "קוד האימות הדו-שלבי שגוי",
"com_ui_2fa_setup": "הגדר אימות דו-שלבי (2FA)",
"com_ui_2fa_verified": "האימות הדו-שלבי אומת בהצלחה",
"com_ui_accept": "אני מקבל",
"com_ui_add": "הוסף",
"com_ui_add_model_preset": "הוספת מודל או הגדרה קבועה לתגובה נוספת",
@@ -461,10 +481,11 @@
"com_ui_artifacts": "רכיבי תצוגה",
"com_ui_artifacts_toggle": "הפעל/כבה רכיבי תצוגה",
"com_ui_artifacts_toggle_agent": "אפשר רכיבי תצוגה",
"com_ui_ascending": "סדר עולה",
"com_ui_assistant": "סייען",
"com_ui_assistant_delete_error": "אירעה שגיאה בעת מחיקת הסייען",
"com_ui_assistant_deleted": "הסייען נמחק בהצלחה",
"com_ui_assistants": "סייענים",
"com_ui_assistants": "סייען",
"com_ui_assistants_output": "פלט סייענים",
"com_ui_attach_error": "לא ניתן לצרף קובץ. צור או בחר שיחה, או נסה לרענן את הדף.",
"com_ui_attach_error_openai": "לא ניתן לצרף את קבצי הסייען לנקודות קצה אחרות",
@@ -473,7 +494,7 @@
"com_ui_attach_warn_endpoint": "עשוי להתעלם מקבצים שאינם של הסייען שאין להם כלי תואם",
"com_ui_attachment": "קובץ מצורף",
"com_ui_auth_type": "סוג אישור",
"com_ui_auth_url": "כתובת URL לאימות",
"com_ui_auth_url": "כתובת URL לאימות הרשאה",
"com_ui_authentication": "אימות",
"com_ui_authentication_type": "סוג אימות",
"com_ui_avatar": "אווטאר",
@@ -573,7 +594,7 @@
"com_ui_description": "תיאור",
"com_ui_description_placeholder": "אופציונלי: הזן תיאור שיוצג עבור ההנחיה (פרומפט)",
"com_ui_disabling": "מבטל הפעלה...",
"com_ui_download": "הורדה",
"com_ui_download": "הורדות",
"com_ui_download_artifact": "רכיב תצוגת הורדות",
"com_ui_download_backup": "הורד קודי גיבוי",
"com_ui_download_backup_tooltip": "לפני שתמשיך, הורד את קודי הגיבוי שלך. תזדקק להם כדי לשחזר גישה במקרה שתאבד את מכשיר האימות שלך",
@@ -610,50 +631,126 @@
"com_ui_fork_default": "השתמש בהגדרות הסתעפויות ברירת מחדל",
"com_ui_fork_error": "אירעה שגיאה בעת פיצול השיחה",
"com_ui_fork_from_message": "בחר הגדרת הסתעפויות",
"com_ui_fork_info_1": "השתמש בהגדרה זו כדי ליצור הסתעפות של הודעות עם ההתנהגות הרצויה.",
"com_ui_fork_info_2": "\"הסתעפות\" מתייחסת ליצירת שיחה חדשה המתחילה/מסתיימת מהודעות ספציפיות בשיחה הנוכחית, תוך יצירת העתק בהתאם לאפשרויות שנבחרו.",
"com_ui_fork_info_3": "\"הודעת היעד\" מתייחסת להודעה שממנה נפתחה חלונית זו, או, אם סימנת \"{{0}}\", להודעה האחרונה בשיחה.",
"com_ui_fork_info_branches": "אפשרות זו מפצלת את ההודעות הגלויות, יחד עם ההסתעפויות הקשורות; במילים אחרות, המסלול הישיר להודעת היעד, כולל את ההסתעפויות לאורך המסלול.",
"com_ui_fork_info_remember": "סמן כדי לזכור את האפשרויות שבחרת לשימושים הבאים, כך שתוכל ליצור הסתעפויות בשיחות מהר יותר לפי העדפתך.",
"com_ui_fork_info_start": "כאשר מסומן, ההסתעפות תחל מההודעה זו ותימשך עד להודעה האחרונה בשיחה, על פי ההתנהגות שנבחרה לעיל.",
"com_ui_fork_info_target": "אפשרות זו תיצור הסתעפות שתכלול את כל ההודעות המובילות להודעת היעד, כולל ההודעות הסמוכות; במילים אחרות, כל ההסתעפויות של ההודעות יכללו, בין אם הם גלויות או לא, ובין אם הם נמצאות באותו מסלול או לא.",
"com_ui_fork_info_visible": "אפשרות זו תיצור הסתעפות רק של ההודעות הגלויות; במילים אחרות, רק את המסלול הישיר להודעת היעד, ללא הסתעפויות נוספות.",
"com_ui_fork_processing": "יוצר הסתעפות בשיחה...",
"com_ui_fork_remember": "זכור",
"com_ui_fork_remember_checked": "הבחירה שלך תישמר אחרי השימוש. תוכל לשנות זאת בכל זמן בהגדרות.",
"com_ui_fork_split_target": "התחל הסתעפות כאן",
"com_ui_fork_split_target_setting": "התחל הסתעפות מהודעת היעד כברירת מחדל",
"com_ui_fork_success": "יצירת ההסתעפות בשיחה הסתיימה בהצלחה",
"com_ui_fork_visible": "הודעות גלויות בלבד",
"com_ui_generate_backup": "צור קודי גיבוי",
"com_ui_generate_qrcode": "צור קוד QR",
"com_ui_generating": "יוצר...",
"com_ui_global_group": "שדה זה לא יכול להישאר ריק",
"com_ui_go_back": "חזור",
"com_ui_go_to_conversation": "חזור לצ'אט",
"com_ui_happy_birthday": "זה יום ההולדת הראשון שלי!",
"com_ui_hide_qr": "הסתר קוד QR",
"com_ui_host": "מארח",
"com_ui_idea": "רעיונות",
"com_ui_image_gen": "מחולל תמונות",
"com_ui_import": "ייבוא",
"com_ui_import_conversation_error": "אירעה שגיאה בעת ייבוא השיחות שלך",
"com_ui_import_conversation_file_type_error": "סוג ייבוא לא נתמך",
"com_ui_import_conversation_info": "ייבא שיחות מקובץ JSON",
"com_ui_import_conversation_success": "השיחות יובאו בהצלחה",
"com_ui_include_shadcnui": "יש לכלול הוראות לשימוש ברכיבי ממשק המשתמש של shadcn/ui",
"com_ui_include_shadcnui_agent": "יש לכלול הוראות שימוש ב-shadcn/ui",
"com_ui_input": "קלט",
"com_ui_instructions": "הוראות",
"com_ui_latest_footer": "גישה לכל הבינות המלאכותיות (AI) לכולם",
"com_ui_latest_production_version": "גרסת הפיתוח העדכנית ביותר",
"com_ui_latest_version": "גרסה אחרונה",
"com_ui_librechat_code_api_key": "קבל את מפתח ה-API של מפענח הקוד LibreChat",
"com_ui_librechat_code_api_subtitle": "אבטחה ללא פשרות. תמיכה במגוון שפות תכנות. יכולת עבודה מלאה עם קבצים.",
"com_ui_librechat_code_api_title": "הרץ קוד AI",
"com_ui_llm_menu": "תפריט מודל שפה גדול (LLM)",
"com_ui_llms_available": "מודל שפה גדול (LLM)",
"com_ui_loading": "טוען...",
"com_ui_locked": "נעול",
"com_ui_logo": "\"לוגו {{0}}\"",
"com_ui_manage": "נהל",
"com_ui_max_tags": "המספר המקסימלי המותר על פי הערכים העדכניים הוא {{0}}.",
"com_ui_mention": "ציין נקודת קצה, סייען, או הנחייה (פרופמט) כדי לעבור אליה במהירות",
"com_ui_min_tags": "לא ניתן למחוק ערכים נוספים, יש צורך במינימום {{0}} ערכים.",
"com_ui_misc": "כללי",
"com_ui_model": "דגם",
"com_ui_model_parameters": "הגדרות המודל",
"com_ui_more_info": "מידע נוסף",
"com_ui_my_prompts": "ההנחיות (פרומפטים) שלי",
"com_ui_name": "שם",
"com_ui_new_chat": "שיחה חדשה",
"com_ui_next": "הבא",
"com_ui_no": "לא",
"com_ui_no_backup_codes": "אין קודי גיבוי זמינים. אנא צור קודים חדשים",
"com_ui_no_bookmarks": "עדיין אין לך סימניות. בחר שיחה והוסף סימניה חדשה",
"com_ui_no_category": "אין קטגוריה",
"com_ui_no_changes": "אין שינויים לעדכן",
"com_ui_no_data": "השדה חייב להכיל תוכן, הוא לא יכול להישאר ריק",
"com_ui_no_terms_content": "אין תוכן תנאים והגבלות להצגה",
"com_ui_no_valid_items": "השדה חייב להכיל תוכן, הוא לא יכול להישאר ריק",
"com_ui_none": "אף אחד",
"com_ui_none_selected": "לא ",
"com_ui_not_used": "לא בשימוש",
"com_ui_nothing_found": "לא נמצא",
"com_ui_oauth": "פרוטוקול אימות פתוח (OAuth)",
"com_ui_of": "של",
"com_ui_off": "של",
"com_ui_on": "פעיל",
"com_ui_page": "עמוד",
"com_ui_prev": "הקודם",
"com_ui_preview": "תצוגה מקדימה",
"com_ui_privacy_policy": "מדיניות פרטיות",
"com_ui_privacy_policy_url": "קישור למדיניות הפרטיות",
"com_ui_prompt": "הנחיה (פרומפט)",
"com_ui_prompt_already_shared_to_all": "ההנחיה הזו כבר משותפת עם כל המשתמשים",
"com_ui_prompt_name": "שם הנחיה (פרומפט)",
"com_ui_prompt_name_required": "נדרש שם הנחיה (פרומפט)",
"com_ui_prompt_preview_not_shared": "היוצר לא אפשר שיתוף פעולה להנחיה זו",
"com_ui_prompt_text": "טקסט",
"com_ui_prompt_text_required": "נדרש טקסט",
"com_ui_prompt_update_error": "אירעה שגיאה בעדכון ההנחיה (פרומפט)",
"com_ui_prompts": "הנחיות (פרומפטים)",
"com_ui_prompts_allow_create": "אפשר יצירת הנחיות",
"com_ui_prompts_allow_share_global": "אפשר שיתוף הנחיות (פרומפטים) עם כל המשתמשים",
"com_ui_prompts_allow_use": "אפשר שימוש בהנחיות (פרומפטים)",
"com_ui_provider": "ספק",
"com_ui_read_aloud": "הקראה",
"com_ui_refresh_link": "רענון קישור",
"com_ui_regenerate": "לחדש",
"com_ui_regenerate_backup": "צור קודי גיבוי מחדש",
"com_ui_regenerating": "יוצר מחדש...",
"com_ui_region": "איזור",
"com_ui_rename": "שנה שם",
"com_ui_rename_prompt": "שנה שם הנחיה (פרומפט)",
"com_ui_requires_auth": "נדרש אימות",
"com_ui_reset_var": "איפוס {{0}}",
"com_ui_result": "תוצאה",
"com_ui_revoke": "בטל",
"com_ui_revoke_info": "בטל את כל האישורים שסופקו על ידי המשתמש",
"com_ui_revoke_key_confirm": "האם אתה בטוח שברצונך לבטל את המפתח הזה?",
"com_ui_revoke_key_endpoint": "ביטול מפתח עבור {{0}}",
"com_ui_revoke_keys": "ביטול מפתחות",
"com_ui_revoke_keys_confirm": "האם אתה בטוח שברצונך לבטל את כל המפתחות?",
"com_ui_role_select": "תפקיד",
"com_ui_roleplay": "משחק תפקידים",
"com_ui_run_code": "הרץ קו",
"com_ui_run_code_error": "אירעה שגיאה בהרצת הקוד",
"com_ui_save": "שמור",
"com_ui_save_submit": "שמור ושלח",
"com_ui_saved": "שמור!",
"com_ui_schema": "סכמה",
"com_ui_scope": "תחום",
"com_ui_search": "חיפוש",
"com_ui_secret_key": "מפתח סודי",
"com_ui_select": "בחר",
"com_ui_select_file": "בחר קובץ",
"com_ui_select_model": "בחר מודל",
@@ -668,6 +765,7 @@
"com_ui_share_create_message": "שמך וכל הודעה שתוסיף לאחר השיתוף יישארו פרטיים.",
"com_ui_share_delete_error": "אירעה שגיאה בעת מחיקת הקישור המשותף.",
"com_ui_share_error": "אירעה שגיאה בעת שיתוף קישור הצ'אט",
"com_ui_share_form_description": "השדה חייב להכיל תוכן, הוא אינו יכול להישאר ריק",
"com_ui_share_link_to_chat": "שתף קישור בצ'אט",
"com_ui_share_to_all_users": "שתף עם כל המשתמשים",
"com_ui_share_update_message": "השם שלך, ההוראות המותאמות אישית וכל ההודעות שתוסיף לאחר השיתוף יישארו פרטיים.",
@@ -677,12 +775,14 @@
"com_ui_shared_link_not_found": "הקישור המשותף לא נמצא",
"com_ui_shared_prompts": "הנחיות (פרומפטים) משותפות",
"com_ui_shop": "קניות",
"com_ui_show": "הצג",
"com_ui_show_all": "הראה הכל",
"com_ui_show_qr": "הראה קוד QR",
"com_ui_sign_in_to_domain": "היכנס אל {{0}}",
"com_ui_simple": "פשוט",
"com_ui_size": "סוג",
"com_ui_special_variables": "משתנים מיוחדים:",
"com_ui_special_variables_info": "השתמש ב-`{{current_date}}` עבור התאריך הנוכחי, וב-`{{current_user}}` עבור שם החשבון שלך.",
"com_ui_speech_while_submitting": "לא ניתן לשלוח אודיו בזמן שנוצרת תגובה",
"com_ui_stop": "עצור",
"com_ui_storage": "אחסון",
@@ -696,12 +796,16 @@
"com_ui_token_exchange_method": "שיטת החלפת טוקנים",
"com_ui_token_url": "קישור URL לטוקן",
"com_ui_tools": "כלים",
"com_ui_travel": "מסע",
"com_ui_unarchive": "לארכיון",
"com_ui_unarchive_error": "אירעה שגיאה בארכיון השיחה",
"com_ui_unknown": "לא ידוע",
"com_ui_update": "עדכון",
"com_ui_upload": "העלה",
"com_ui_upload_code_files": "העלאה עבור מפענח הקוד",
"com_ui_upload_delay": "העלאת \"{{0}}\" לוקחת יותר זמן מהצפוי. אנא המתן בזמן שהקובץ מסיים את האינדוקס לאחזור.",
"com_ui_upload_error": "אירעה שגיאה בהעלאת הקובץ שלך",
"com_ui_upload_file_search": "העלאה לחיפוש בקבצים",
"com_ui_upload_files": "העלה קבצים",
"com_ui_upload_image": "העלה תמונה",
"com_ui_upload_image_input": "העלה תמונה",
@@ -709,9 +813,14 @@
"com_ui_upload_invalid_var": "אין אפשרות להעלות את הקובץ. התמונה צריכה להיות בגודל של עד {{0}} MB",
"com_ui_upload_success": "הקובץ הועלה בהצלחה",
"com_ui_upload_type": "בחר סוג העלאה",
"com_ui_use_2fa_code": "השתמש בקוד אימות דו-שלבי (2FA) במקום",
"com_ui_use_backup_code": "השתמש בקוד גיבוי במקום",
"com_ui_use_micrphone": "שימוש במיקורפון",
"com_ui_use_prompt": "השתמש בהנחיה (פרומפט)",
"com_ui_used": "נוצל",
"com_ui_variables": "משתנים",
"com_ui_variables_info": "השתמש בסוגריים מסולסלות כפולות בטקסט שלך ליצירת משתנים, לדוגמא `{{example variable}}`, כדי למלא אותם מאוחר יותר בשימוש בהנחיה.",
"com_ui_verify": "אמת",
"com_ui_version_var": "גרסה {{0}}",
"com_ui_versions": "גרסה",
"com_ui_view_source": "הצג צ'אט מקורי",

View File

@@ -1,9 +1,16 @@
{
"chat_direction_left_to_right": "algo precisa ir aqui. esta vazio",
"chat_direction_right_to_left": "algo precisa ir aqui. esta vazio",
"com_a11y_ai_composing": "A IA ainda está compondo.",
"com_a11y_end": "A IA terminou de responder.",
"com_a11y_start": "A IA começou a responder.",
"com_agents_allow_editing": "Permitir que outros usuários editem seu agente",
"com_agents_by_librechat": "por LibreChat",
"com_agents_code_interpreter": "Quando ativado, permite que seu agente aproveite a API do interpretador de código LibreChat para executar o código gerado, incluindo o processamento de arquivos, com segurança. Requer uma chave de API válida.",
"com_agents_code_interpreter_title": "API do Interpretador de Código",
"com_agents_create_error": "Houve um erro ao criar seu agente.",
"com_agents_description_placeholder": "Opcional: Descreva seu Agente aqui",
"com_agents_enable_file_search": "Habilitar pesquisa de arquivos",
"com_agents_instructions_placeholder": "As instruções do sistema que o agente usa",
"com_agents_name_placeholder": "Opcional: O nome do agente",
"com_agents_search_name": "Pesquisar agentes por nome",
@@ -626,6 +633,8 @@
"com_ui_variables_info": "Use chaves duplas no seu texto para criar variáveis, por exemplo, `{{exemplo de variável}}`, para preencher posteriormente ao usar o prompt.",
"com_ui_version_var": "Versão {{0}}",
"com_ui_versions": "Versões",
"com_ui_write": "Escrevendo",
"com_ui_yes": "Sim",
"com_ui_zoom": "Zoom",
"com_user_message": "Você"
}

View File

@@ -1,4 +1,6 @@
{
"chat_direction_left_to_right": "未找到描述。",
"chat_direction_right_to_left": "未找到描述。",
"com_a11y_ai_composing": "AI 仍在撰写中。",
"com_a11y_end": "AI 已完成回复。",
"com_a11y_start": "AI 已开始回复。",
@@ -85,6 +87,7 @@
"com_auth_email_verification_redirecting": "在 {{0}} 秒后重定向...",
"com_auth_email_verification_resend_prompt": "未收到邮件?",
"com_auth_email_verification_success": "邮箱验证成功",
"com_auth_email_verifying_ellipsis": "验证中...",
"com_auth_error_create": "注册账户过程中出现错误,请重试。",
"com_auth_error_invalid_reset_token": "重置密码的密钥已失效。",
"com_auth_error_login": "无法登录,请确认提供的账户密码正确,并重新尝试。",
@@ -121,9 +124,11 @@
"com_auth_submit_registration": "注册提交",
"com_auth_to_reset_your_password": "重置密码。",
"com_auth_to_try_again": "再试一次。",
"com_auth_two_factor": "查看您首选的一次性密码应用程序,获取密码",
"com_auth_username": "用户名(可选)",
"com_auth_username_max_length": "用户名最多 20 个字符",
"com_auth_username_min_length": "用户名至少 2 个字符",
"com_auth_verify_your_identity": "验证您的身份",
"com_auth_welcome_back": "欢迎",
"com_click_to_download": "(点击此处下载)",
"com_download_expired": "下载已过期",
@@ -136,6 +141,8 @@
"com_endpoint_anthropic_maxoutputtokens": "响应中可以生成的最大令牌数。指定较低的值以获得较短的响应,指定较高的值以获得较长的响应。注意:模型可能会在达到此最大值之前停止。",
"com_endpoint_anthropic_prompt_cache": "提示词缓存允许在 API 调用中复用大型上下文或指令,从而降低成本和延迟",
"com_endpoint_anthropic_temp": "值介于 0 到 1 之间。对于分析性/选择性任务,值应更接近 0对于创造性和生成性任务值应更接近 1。我们建议更改该参数或 Top P但不要同时更改这两个参数。",
"com_endpoint_anthropic_thinking": "启用支持的 Claude 模型3.7 Sonnet的内部推理。注要求设置 \"思维预算\",且低于 \"最大输出令牌\"。",
"com_endpoint_anthropic_thinking_budget": "决定 Claude 内部推理过程允许使用的最大Token 数。尽管 Claude 可能不会使用分配的全部预算,尤其是在超过 32K 的范围内,但较大的预算可以对复杂问题进行更深入的分析,从而提高响应质量。此设置必须小于 \"最大输出 Token \"。",
"com_endpoint_anthropic_topk": "top-k 会改变模型选择输出词元的方式。top-k 为 1 意味着所选词是模型词汇中概率最大的(也称为贪心解码),而 top-k 为 3 意味着下一个词是从 3 个概率最大的词中选出的(使用随机性)。",
"com_endpoint_anthropic_topp": "top-p核采样会改变模型选择输出词元的方式。从概率最大的 K参见topK参数向最小的 K 选择,直到它们的概率之和等于 top-p 值。",
"com_endpoint_assistant": "助手",
@@ -198,6 +205,7 @@
"com_endpoint_openai_max_tokens": "可选的 'max_tokens' 字段,表示在聊天补全中可生成的最大词元数量。输入词元和生成词元的总长度受模型上下文长度的限制。如果该数值超过最大上下文词元数,您可能会遇到错误。",
"com_endpoint_openai_pres": "值介于 -2.0 到 2.0 之间。正值将惩罚当前已经使用的词元,从而增加讨论新话题的可能性。",
"com_endpoint_openai_prompt_prefix_placeholder": "在系统消息中添加自定义指令,默认为空",
"com_endpoint_openai_reasoning_effort": "仅 o1 模型:限制推理模型的推理工作量。减少推理工作可加快回复速度,减少回复中用于推理的标记。",
"com_endpoint_openai_resend": "重新发送所有先前附加的图像。注意:这会显着增加词元成本,并且可能会遇到很多关于图像附件的错误。",
"com_endpoint_openai_resend_files": "重新发送所有先前附加的文件。注意:这会显着增加词元成本,并且可能会遇到很多关于图像附件的错误。",
"com_endpoint_openai_stop": "最多 4 个序列API 将停止生成更多词元。",
@@ -211,6 +219,7 @@
"com_endpoint_plug_use_functions": "使用函数",
"com_endpoint_presence_penalty": "话题新鲜度",
"com_endpoint_preset": "预设",
"com_endpoint_preset_custom_name_placeholder": "这里需要放置一些内容,但目前是空的。",
"com_endpoint_preset_default": "现在是默认预设。",
"com_endpoint_preset_default_item": "默认:",
"com_endpoint_preset_default_none": "无默认预设可用。",
@@ -239,6 +248,8 @@
"com_endpoint_stop": "停止序列",
"com_endpoint_stop_placeholder": "按 `Enter` 键分隔多个值",
"com_endpoint_temperature": "随机性",
"com_endpoint_thinking": "思考中",
"com_endpoint_thinking_budget": "预算思考",
"com_endpoint_top_k": "Top K",
"com_endpoint_top_p": "Top P",
"com_endpoint_use_active_assistant": "使用激活的助手",
@@ -258,8 +269,10 @@
"com_files_filter": "筛选文件...",
"com_files_no_results": "无结果。",
"com_files_number_selected": "已选择 {{0}} 个文件(共 {{1}} 个文件)",
"com_files_table": "这里需要放一些内容,但目前是空的。",
"com_generated_files": "生成的文件",
"com_hide_examples": "隐藏示例",
"com_nav_2fa": "双重身份验证 (2FA)",
"com_nav_account_settings": "账户设置",
"com_nav_always_make_prod": "始终应用新版本",
"com_nav_archive_created_at": "归档时间",
@@ -325,6 +338,7 @@
"com_nav_help_faq": "帮助",
"com_nav_hide_panel": "隐藏最右侧面板",
"com_nav_info_code_artifacts": "启用在对话旁显示的实验性代码工件",
"com_nav_info_code_artifacts_agent": "使该代理能够使用代码附件。默认情况下,除非启用“自定义提示模式”,否则会添加与附件使用相关的额外说明。",
"com_nav_info_custom_prompt_mode": "启用后,默认的工件系统提示将不会包含在内。在此模式下,必须手动提供所有生成工件的指令。",
"com_nav_info_enter_to_send": "启用后,按下 `ENTER` 将发送您的消息。禁用后,按下 `ENTER` 将添加新行,您需要按下 `CTRL + ENTER` / `⌘ + ENTER` 来发送消息。",
"com_nav_info_fork_change_default": "`仅可见消息` 仅包含到所选消息的直接路径,`包含相关分支` 添加路径上的分支,`包含所有目标` 包括所有连接的消息和分支。",
@@ -343,6 +357,7 @@
"com_nav_lang_estonian": "Eesti keel",
"com_nav_lang_finnish": "Suomi",
"com_nav_lang_french": "Français ",
"com_nav_lang_georgian": "格鲁吉亚语Georgian",
"com_nav_lang_german": "Deutsch",
"com_nav_lang_hebrew": "עברית",
"com_nav_lang_indonesia": "Indonesia",
@@ -389,6 +404,7 @@
"com_nav_settings": "设置",
"com_nav_shared_links": "共享链接",
"com_nav_show_code": "使用代码解释器时始终显示代码",
"com_nav_show_thinking": "默认情况下启用思考下拉菜单",
"com_nav_slash_command": "/-命令",
"com_nav_slash_command_description": "切换至命令 “/” 以通过键盘选择提示词",
"com_nav_source_buffer_error": "设置音频播放时发生错误。请刷新页面。",
@@ -427,6 +443,16 @@
"com_sidepanel_parameters": "参数",
"com_sidepanel_select_agent": "选择助手",
"com_sidepanel_select_assistant": "选择助手",
"com_ui_2fa_account_security": "双重身份验证为您的账户提供了额外的安全保护",
"com_ui_2fa_disable": "关闭双重身份验证",
"com_ui_2fa_disable_error": "禁用双重身份验证时出现错误",
"com_ui_2fa_disabled": "双重身份验证已被禁用",
"com_ui_2fa_enable": "双重身份验证",
"com_ui_2fa_enabled": "双重身份验证已启用",
"com_ui_2fa_generate_error": "生成双重身份验证设置时发生错误。",
"com_ui_2fa_invalid": "无效的双重身份验证代码",
"com_ui_2fa_setup": "设置双重身份验证",
"com_ui_2fa_verified": "成功验证双重身份验证",
"com_ui_accept": "我接受",
"com_ui_add": "添加",
"com_ui_add_model_preset": "添加一个模型或预设以获得额外的回复",
@@ -441,17 +467,22 @@
"com_ui_agent_duplicate_error": "复制助手时发生错误",
"com_ui_agent_duplicated": "助手复制成功",
"com_ui_agent_editing_allowed": "其他用户已可以编辑此助手",
"com_ui_agent_shared_to_all": "这里需要填入一些内容 之前是空的",
"com_ui_agents": "代理",
"com_ui_agents_allow_create": "允许创建助手",
"com_ui_agents_allow_share_global": "允许与所有用户共享助手",
"com_ui_agents_allow_use": "允许使用助手",
"com_ui_all": "所有",
"com_ui_all_proper": "所有",
"com_ui_analyzing": "正在分析",
"com_ui_analyzing_finished": "完成分析",
"com_ui_api_key": "API 密钥",
"com_ui_archive": "归档",
"com_ui_archive_error": "归档对话失败",
"com_ui_artifact_click": "点击以打开",
"com_ui_artifacts": "Artifacts",
"com_ui_artifacts_toggle": "切换至 Artifacts UI",
"com_ui_artifacts_toggle_agent": "启用附件",
"com_ui_ascending": "升序",
"com_ui_assistant": "助手",
"com_ui_assistant_delete_error": "删除助手时出现错误",
@@ -469,6 +500,7 @@
"com_ui_authentication": "认证",
"com_ui_authentication_type": "认证类型",
"com_ui_avatar": "头像",
"com_ui_azure": "Azure",
"com_ui_back_to_chat": "返回对话",
"com_ui_back_to_prompts": "返回提示词",
"com_ui_basic": "基本",

View File

@@ -11,7 +11,14 @@ const availableTools = atom<Record<string, TPlugin>>({
default: {},
});
// New atom to hold the decrypted private key (as a CryptoKey)
const decryptedPrivateKey = atom<CryptoKey | null>({
key: 'decryptedPrivateKey',
default: null,
});
export default {
user,
availableTools,
};
decryptedPrivateKey,
};

2638
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "librechat-data-provider",
"version": "0.7.6993",
"version": "0.7.6996",
"description": "data services for librechat apps",
"main": "dist/index.js",
"module": "dist/index.es.js",

View File

@@ -585,21 +585,99 @@ describe('resolveRef', () => {
openapiSpec.paths['/ai.chatgpt.render-flowchart']?.post
?.requestBody as OpenAPIV3.RequestBodyObject
).content['application/json'].schema;
expect(flowchartRequestRef).toBeDefined();
const resolvedFlowchartRequest = resolveRef(
flowchartRequestRef as OpenAPIV3.RequestBodyObject,
openapiSpec.components,
);
expect(resolvedFlowchartRequest).toBeDefined();
expect(resolvedFlowchartRequest.type).toBe('object');
const properties = resolvedFlowchartRequest.properties as FlowchartSchema;
expect(properties).toBeDefined();
expect(flowchartRequestRef).toBeDefined();
const resolvedSchemaObject = resolveRef(
flowchartRequestRef as OpenAPIV3.ReferenceObject,
openapiSpec.components,
) as OpenAPIV3.SchemaObject;
expect(resolvedSchemaObject).toBeDefined();
expect(resolvedSchemaObject.type).toBe('object');
expect(resolvedSchemaObject.properties).toBeDefined();
const properties = resolvedSchemaObject.properties as FlowchartSchema;
expect(properties.mermaid).toBeDefined();
expect(properties.mermaid.type).toBe('string');
});
});
describe('resolveRef general cases', () => {
const spec = {
openapi: '3.0.0',
info: { title: 'TestSpec', version: '1.0.0' },
paths: {},
components: {
schemas: {
TestSchema: { type: 'string' },
},
parameters: {
TestParam: {
name: 'myParam',
in: 'query',
required: false,
schema: { $ref: '#/components/schemas/TestSchema' },
},
},
requestBodies: {
TestRequestBody: {
content: {
'application/json': {
schema: { $ref: '#/components/schemas/TestSchema' },
},
},
},
},
},
} satisfies OpenAPIV3.Document;
it('resolves schema refs correctly', () => {
const schemaRef: OpenAPIV3.ReferenceObject = { $ref: '#/components/schemas/TestSchema' };
const resolvedSchema = resolveRef<OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject>(
schemaRef,
spec.components,
);
expect(resolvedSchema.type).toEqual('string');
});
it('resolves parameter refs correctly, then schema within parameter', () => {
const paramRef: OpenAPIV3.ReferenceObject = { $ref: '#/components/parameters/TestParam' };
const resolvedParam = resolveRef<OpenAPIV3.ReferenceObject | OpenAPIV3.ParameterObject>(
paramRef,
spec.components,
);
expect(resolvedParam.name).toEqual('myParam');
expect(resolvedParam.in).toEqual('query');
expect(resolvedParam.required).toBe(false);
const paramSchema = resolveRef<OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject>(
resolvedParam.schema as OpenAPIV3.ReferenceObject,
spec.components,
);
expect(paramSchema.type).toEqual('string');
});
it('resolves requestBody refs correctly, then schema within requestBody', () => {
const requestBodyRef: OpenAPIV3.ReferenceObject = {
$ref: '#/components/requestBodies/TestRequestBody',
};
const resolvedRequestBody = resolveRef<OpenAPIV3.ReferenceObject | OpenAPIV3.RequestBodyObject>(
requestBodyRef,
spec.components,
);
expect(resolvedRequestBody.content['application/json']).toBeDefined();
const schemaInRequestBody = resolveRef<OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject>(
resolvedRequestBody.content['application/json'].schema as OpenAPIV3.ReferenceObject,
spec.components,
);
expect(schemaInRequestBody.type).toEqual('string');
});
});
describe('openapiToFunction', () => {
it('converts OpenAPI spec to function signatures and request builders', () => {
const { functionSignatures, requestBuilders } = openapiToFunction(getWeatherOpenapiSpec);
@@ -1095,4 +1173,43 @@ describe('createURL', () => {
});
});
});
describe('openapiToFunction parameter refs resolution', () => {
const weatherSpec = {
openapi: '3.0.0',
info: { title: 'Weather', version: '1.0.0' },
servers: [{ url: 'https://api.weather.gov' }],
paths: {
'/points/{point}': {
get: {
operationId: 'getPoint',
parameters: [{ $ref: '#/components/parameters/PathPoint' }],
responses: { '200': { description: 'ok' } },
},
},
},
components: {
parameters: {
PathPoint: {
name: 'point',
in: 'path',
required: true,
schema: { type: 'string', pattern: '^(-?\\d+(?:\\.\\d+)?),(-?\\d+(?:\\.\\d+)?)$' },
},
},
},
} satisfies OpenAPIV3.Document;
it('correctly resolves $ref for parameters', () => {
const { functionSignatures } = openapiToFunction(weatherSpec, true);
const func = functionSignatures.find((sig) => sig.name === 'getPoint');
expect(func).toBeDefined();
expect(func?.parameters.properties).toHaveProperty('point');
expect(func?.parameters.required).toContain('point');
const paramSchema = func?.parameters.properties['point'] as OpenAPIV3.SchemaObject;
expect(paramSchema.type).toEqual('string');
expect(paramSchema.pattern).toEqual('^(-?\\d+(?:\\.\\d+)?),(-?\\d+(?:\\.\\d+)?)$');
});
});
});

View File

@@ -0,0 +1,52 @@
import { StdioOptionsSchema } from '../src/mcp';
describe('Environment Variable Extraction (MCP)', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = {
...originalEnv,
TEST_API_KEY: 'test-api-key-value',
ANOTHER_SECRET: 'another-secret-value',
};
});
afterEach(() => {
process.env = originalEnv;
});
describe('StdioOptionsSchema', () => {
it('should transform environment variables in the env field', () => {
const options = {
command: 'node',
args: ['server.js'],
env: {
API_KEY: '${TEST_API_KEY}',
ANOTHER_KEY: '${ANOTHER_SECRET}',
PLAIN_VALUE: 'plain-value',
NON_EXISTENT: '${NON_EXISTENT_VAR}',
},
};
const result = StdioOptionsSchema.parse(options);
expect(result.env).toEqual({
API_KEY: 'test-api-key-value',
ANOTHER_KEY: 'another-secret-value',
PLAIN_VALUE: 'plain-value',
NON_EXISTENT: '${NON_EXISTENT_VAR}',
});
});
it('should handle undefined env field', () => {
const options = {
command: 'node',
args: ['server.js'],
};
const result = StdioOptionsSchema.parse(options);
expect(result.env).toBeUndefined();
});
});
});

View File

@@ -1,48 +0,0 @@
import { extractEnvVariable } from '../src/parsers';
describe('extractEnvVariable', () => {
const originalEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterAll(() => {
process.env = originalEnv;
});
test('should return the value of the environment variable', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value');
});
test('should return the original string if the envrionment variable is not defined correctly', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }');
});
test('should return the original string if environment variable is not set', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
test('should return the original string if it does not contain an environment variable', () => {
expect(extractEnvVariable('some_string')).toBe('some_string');
});
test('should handle empty strings', () => {
expect(extractEnvVariable('')).toBe('');
});
test('should handle strings without variable format', () => {
expect(extractEnvVariable('no_var_here')).toBe('no_var_here');
});
test('should not process multiple variable formats', () => {
process.env.FIRST_VAR = 'first';
process.env.SECOND_VAR = 'second';
expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe(
'${FIRST_VAR} and ${SECOND_VAR}',
);
});
});

View File

@@ -0,0 +1,129 @@
import { extractEnvVariable } from '../src/utils';
describe('Environment Variable Extraction', () => {
const originalEnv = process.env;
beforeEach(() => {
process.env = {
...originalEnv,
TEST_API_KEY: 'test-api-key-value',
ANOTHER_SECRET: 'another-secret-value',
};
});
afterEach(() => {
process.env = originalEnv;
});
describe('extractEnvVariable (original tests)', () => {
test('should return the value of the environment variable', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${TEST_VAR}')).toBe('test_value');
});
test('should return the original string if the envrionment variable is not defined correctly', () => {
process.env.TEST_VAR = 'test_value';
expect(extractEnvVariable('${ TEST_VAR }')).toBe('${ TEST_VAR }');
});
test('should return the original string if environment variable is not set', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
test('should return the original string if it does not contain an environment variable', () => {
expect(extractEnvVariable('some_string')).toBe('some_string');
});
test('should handle empty strings', () => {
expect(extractEnvVariable('')).toBe('');
});
test('should handle strings without variable format', () => {
expect(extractEnvVariable('no_var_here')).toBe('no_var_here');
});
/** No longer the expected behavior; keeping for reference */
test.skip('should not process multiple variable formats', () => {
process.env.FIRST_VAR = 'first';
process.env.SECOND_VAR = 'second';
expect(extractEnvVariable('${FIRST_VAR} and ${SECOND_VAR}')).toBe(
'${FIRST_VAR} and ${SECOND_VAR}',
);
});
});
describe('extractEnvVariable function', () => {
it('should extract environment variables from exact matches', () => {
expect(extractEnvVariable('${TEST_API_KEY}')).toBe('test-api-key-value');
expect(extractEnvVariable('${ANOTHER_SECRET}')).toBe('another-secret-value');
});
it('should extract environment variables from strings with prefixes', () => {
expect(extractEnvVariable('prefix-${TEST_API_KEY}')).toBe('prefix-test-api-key-value');
});
it('should extract environment variables from strings with suffixes', () => {
expect(extractEnvVariable('${TEST_API_KEY}-suffix')).toBe('test-api-key-value-suffix');
});
it('should extract environment variables from strings with both prefixes and suffixes', () => {
expect(extractEnvVariable('prefix-${TEST_API_KEY}-suffix')).toBe(
'prefix-test-api-key-value-suffix',
);
});
it('should not match invalid patterns', () => {
expect(extractEnvVariable('$TEST_API_KEY')).toBe('$TEST_API_KEY');
expect(extractEnvVariable('{TEST_API_KEY}')).toBe('{TEST_API_KEY}');
expect(extractEnvVariable('TEST_API_KEY')).toBe('TEST_API_KEY');
});
});
describe('extractEnvVariable', () => {
it('should extract environment variable values', () => {
expect(extractEnvVariable('${TEST_API_KEY}')).toBe('test-api-key-value');
expect(extractEnvVariable('${ANOTHER_SECRET}')).toBe('another-secret-value');
});
it('should return the original string if environment variable is not found', () => {
expect(extractEnvVariable('${NON_EXISTENT_VAR}')).toBe('${NON_EXISTENT_VAR}');
});
it('should return the original string if no environment variable pattern is found', () => {
expect(extractEnvVariable('plain-string')).toBe('plain-string');
});
});
describe('extractEnvVariable space trimming', () => {
beforeEach(() => {
process.env.HELLO = 'world';
process.env.USER = 'testuser';
});
it('should extract the value when string contains only an environment variable with surrounding whitespace', () => {
expect(extractEnvVariable(' ${HELLO} ')).toBe('world');
expect(extractEnvVariable(' ${HELLO} ')).toBe('world');
expect(extractEnvVariable('\t${HELLO}\n')).toBe('world');
});
it('should preserve content when variable is part of a larger string', () => {
expect(extractEnvVariable('Hello ${USER}!')).toBe('Hello testuser!');
expect(extractEnvVariable(' Hello ${USER}! ')).toBe('Hello testuser!');
});
it('should not handle multiple variables', () => {
expect(extractEnvVariable('${HELLO} ${USER}')).toBe('${HELLO} ${USER}');
expect(extractEnvVariable(' ${HELLO} ${USER} ')).toBe('${HELLO} ${USER}');
});
it('should handle undefined variables', () => {
expect(extractEnvVariable(' ${UNDEFINED_VAR} ')).toBe('${UNDEFINED_VAR}');
});
it('should handle mixed content correctly', () => {
expect(extractEnvVariable('Welcome, ${USER}!\nYour message: ${HELLO}')).toBe(
'Welcome, testuser!\nYour message: world',
);
});
});
});

View File

@@ -22,8 +22,8 @@ export type ParametersSchema = {
export type OpenAPISchema = OpenAPIV3.SchemaObject &
ParametersSchema & {
items?: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject;
};
items?: OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject;
};
export type ApiKeyCredentials = {
api_key: string;
@@ -43,8 +43,8 @@ export type Credentials = ApiKeyCredentials | OAuthCredentials;
type MediaTypeObject =
| undefined
| {
[media: string]: OpenAPIV3.MediaTypeObject | undefined;
};
[media: string]: OpenAPIV3.MediaTypeObject | undefined;
};
type RequestBodyObject = Omit<OpenAPIV3.RequestBodyObject, 'content'> & {
content: MediaTypeObject;
@@ -358,19 +358,29 @@ export class ActionRequest {
}
}
export function resolveRef(
schema: OpenAPIV3.SchemaObject | OpenAPIV3.ReferenceObject | RequestBodyObject,
components?: OpenAPIV3.ComponentsObject,
): OpenAPIV3.SchemaObject {
if ('$ref' in schema && components) {
const refPath = schema.$ref.replace(/^#\/components\/schemas\//, '');
const resolvedSchema = components.schemas?.[refPath];
if (!resolvedSchema) {
throw new Error(`Reference ${schema.$ref} not found`);
export function resolveRef<
T extends
| OpenAPIV3.ReferenceObject
| OpenAPIV3.SchemaObject
| OpenAPIV3.ParameterObject
| OpenAPIV3.RequestBodyObject,
>(obj: T, components?: OpenAPIV3.ComponentsObject): Exclude<T, OpenAPIV3.ReferenceObject> {
if ('$ref' in obj && components) {
const refPath = obj.$ref.replace(/^#\/components\//, '').split('/');
let resolved: unknown = components as Record<string, unknown>;
for (const segment of refPath) {
if (typeof resolved === 'object' && resolved !== null && segment in resolved) {
resolved = (resolved as Record<string, unknown>)[segment];
} else {
throw new Error(`Could not resolve reference: ${obj.$ref}`);
}
}
return resolveRef(resolvedSchema, components);
return resolveRef(resolved as typeof obj, components) as Exclude<T, OpenAPIV3.ReferenceObject>;
}
return schema as OpenAPIV3.SchemaObject;
return obj as Exclude<T, OpenAPIV3.ReferenceObject>;
}
function sanitizeOperationId(input: string) {
@@ -399,7 +409,7 @@ export function openapiToFunction(
const operationObj = operation as OpenAPIV3.OperationObject & {
'x-openai-isConsequential'?: boolean;
} & {
'x-strict'?: boolean
'x-strict'?: boolean;
};
// Operation ID is used as the function name
@@ -415,15 +425,25 @@ export function openapiToFunction(
};
if (operationObj.parameters) {
for (const param of operationObj.parameters) {
const paramObj = param as OpenAPIV3.ParameterObject;
const resolvedSchema = resolveRef(
{ ...paramObj.schema } as OpenAPIV3.ReferenceObject | OpenAPIV3.SchemaObject,
for (const param of operationObj.parameters ?? []) {
const resolvedParam = resolveRef(
param,
openapiSpec.components,
);
parametersSchema.properties[paramObj.name] = resolvedSchema;
if (paramObj.required === true) {
parametersSchema.required.push(paramObj.name);
) as OpenAPIV3.ParameterObject;
const paramName = resolvedParam.name;
if (!paramName || !resolvedParam.schema) {
continue;
}
const paramSchema = resolveRef(
resolvedParam.schema,
openapiSpec.components,
) as OpenAPIV3.SchemaObject;
parametersSchema.properties[paramName] = paramSchema;
if (resolvedParam.required) {
parametersSchema.required.push(paramName);
}
}
}
@@ -446,7 +466,12 @@ export function openapiToFunction(
}
}
const functionSignature = new FunctionSignature(operationId, description, parametersSchema, isStrict);
const functionSignature = new FunctionSignature(
operationId,
description,
parametersSchema,
isStrict,
);
functionSignatures.push(functionSignature);
const actionRequest = new ActionRequest(
@@ -544,4 +569,4 @@ export function validateAndParseOpenAPISpec(specString: string): ValidationResul
console.error(error);
return { status: false, message: 'Error parsing OpenAPI spec.' };
}
}
}

View File

@@ -238,10 +238,14 @@ export const userTerms = () => '/api/user/terms';
export const acceptUserTerms = () => '/api/user/terms/accept';
export const banner = () => '/api/banner';
export const encryption = () => '/api/user/encryption';
// Two-Factor Endpoints
export const enableTwoFactor = () => '/api/auth/2fa/enable';
export const verifyTwoFactor = () => '/api/auth/2fa/verify';
export const confirmTwoFactor = () => '/api/auth/2fa/confirm';
export const disableTwoFactor = () => '/api/auth/2fa/disable';
export const regenerateBackupCodes = () => '/api/auth/2fa/backup/regenerate';
export const verifyTwoFactorTemp = () => '/api/auth/2fa/verify-temp';
export const verifyTwoFactorTemp = () => '/api/auth/2fa/verify-temp';

View File

@@ -6,8 +6,9 @@ import type {
TValidatedAzureConfig,
TAzureConfigValidationResult,
} from '../src/config';
import { errorsToString, extractEnvVariable, envVarRegex } from '../src/parsers';
import { extractEnvVariable, envVarRegex } from '../src/utils';
import { azureGroupConfigsSchema } from '../src/config';
import { errorsToString } from '../src/parsers';
export const deprecatedAzureVariables = [
/* "related to" precedes description text */

View File

@@ -1,6 +1,20 @@
import { z } from 'zod';
import * as s from './schemas';
type ThinkingConfig = {
type: 'enabled';
budget_tokens: number;
};
type AnthropicReasoning = {
thinking?: ThinkingConfig | boolean;
thinkingBudget?: number;
};
type AnthropicInput = BedrockConverseInput & {
additionalModelRequestFields: BedrockConverseInput['additionalModelRequestFields'] &
AnthropicReasoning;
};
export const bedrockInputSchema = s.tConversationSchema
.pick({
/* LibreChat params; optionType: 'conversation' */
@@ -21,11 +35,24 @@ export const bedrockInputSchema = s.tConversationSchema
temperature: true,
topP: true,
stop: true,
thinking: true,
thinkingBudget: true,
/* Catch-all fields */
topK: true,
additionalModelRequestFields: true,
})
.transform((obj) => s.removeNullishValues(obj))
.transform((obj) => {
if ((obj as AnthropicInput).additionalModelRequestFields?.thinking != null) {
const _obj = obj as AnthropicInput;
obj.thinking = !!_obj.additionalModelRequestFields.thinking;
obj.thinkingBudget =
typeof _obj.additionalModelRequestFields.thinking === 'object'
? (_obj.additionalModelRequestFields.thinking as ThinkingConfig)?.budget_tokens
: undefined;
delete obj.additionalModelRequestFields;
}
return s.removeNullishValues(obj);
})
.catch(() => ({}));
export type BedrockConverseInput = z.infer<typeof bedrockInputSchema>;
@@ -49,6 +76,8 @@ export const bedrockInputParser = s.tConversationSchema
temperature: true,
topP: true,
stop: true,
thinking: true,
thinkingBudget: true,
/* Catch-all fields */
topK: true,
additionalModelRequestFields: true,
@@ -87,6 +116,27 @@ export const bedrockInputParser = s.tConversationSchema
}
});
/** Default thinking and thinkingBudget for 'anthropic.claude-3-7-sonnet' models, if not defined */
if (
typeof typedData.model === 'string' &&
typedData.model.includes('anthropic.claude-3-7-sonnet')
) {
if (additionalFields.thinking === undefined) {
additionalFields.thinking = true;
} else if (additionalFields.thinking === false) {
delete additionalFields.thinking;
delete additionalFields.thinkingBudget;
}
if (additionalFields.thinking === true && additionalFields.thinkingBudget === undefined) {
additionalFields.thinkingBudget = 2000;
}
additionalFields.anthropic_beta = ['output-128k-2025-02-19'];
} else if (additionalFields.thinking != null || additionalFields.thinkingBudget != null) {
delete additionalFields.thinking;
delete additionalFields.thinkingBudget;
}
if (Object.keys(additionalFields).length > 0) {
typedData.additionalModelRequestFields = {
...((typedData.additionalModelRequestFields as Record<string, unknown> | undefined) || {}),
@@ -104,9 +154,34 @@ export const bedrockInputParser = s.tConversationSchema
})
.catch(() => ({}));
/**
* Configures the "thinking" parameter based on given input and thinking options.
*
* @param data - The parsed Bedrock request options object
* @returns The object with thinking configured appropriately
*/
function configureThinking(data: AnthropicInput): AnthropicInput {
const updatedData = { ...data };
if (updatedData.additionalModelRequestFields?.thinking === true) {
updatedData.maxTokens = updatedData.maxTokens ?? updatedData.maxOutputTokens ?? 8192;
delete updatedData.maxOutputTokens;
const thinkingConfig: AnthropicReasoning['thinking'] = {
type: 'enabled',
budget_tokens: updatedData.additionalModelRequestFields.thinkingBudget ?? 2000,
};
if (thinkingConfig.budget_tokens > updatedData.maxTokens) {
thinkingConfig.budget_tokens = Math.floor(updatedData.maxTokens * 0.9);
}
updatedData.additionalModelRequestFields.thinking = thinkingConfig;
delete updatedData.additionalModelRequestFields.thinkingBudget;
}
return updatedData;
}
export const bedrockOutputParser = (data: Record<string, unknown>) => {
const knownKeys = [...Object.keys(s.tConversationSchema.shape), 'topK', 'top_k'];
const result: Record<string, unknown> = {};
let result: Record<string, unknown> = {};
// Extract known fields from the root level
Object.entries(data).forEach(([key, value]) => {
@@ -125,6 +200,8 @@ export const bedrockOutputParser = (data: Record<string, unknown>) => {
if (knownKeys.includes(key)) {
if (key === 'top_k') {
result['topK'] = value;
} else if (key === 'thinking' || key === 'thinkingBudget') {
return;
} else {
result[key] = value;
}
@@ -140,8 +217,11 @@ export const bedrockOutputParser = (data: Record<string, unknown>) => {
result.maxTokens = result.maxOutputTokens;
}
// Remove additionalModelRequestFields from the result
delete result.additionalModelRequestFields;
result = configureThinking(result as AnthropicInput);
// Remove additionalModelRequestFields from the result if it doesn't thinking config
if ((result as AnthropicInput).additionalModelRequestFields?.thinking == null) {
delete result.additionalModelRequestFields;
}
return result;
};

View File

@@ -15,6 +15,7 @@ export const defaultRetrievalModels = [
'o1-preview',
'o1-mini-2024-09-12',
'o1-mini',
'o3-mini',
'chatgpt-4o-latest',
'gpt-4o-2024-05-13',
'gpt-4o-2024-08-06',
@@ -651,6 +652,8 @@ export const alternateName = {
const sharedOpenAIModels = [
'gpt-4o-mini',
'gpt-4o',
'gpt-4.5-preview',
'gpt-4.5-preview-2025-02-27',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0125',
'gpt-4-turbo',
@@ -723,7 +726,7 @@ export const bedrockModels = [
export const defaultModels = {
[EModelEndpoint.azureAssistants]: sharedOpenAIModels,
[EModelEndpoint.assistants]: ['chatgpt-4o-latest', ...sharedOpenAIModels],
[EModelEndpoint.assistants]: [...sharedOpenAIModels, 'chatgpt-4o-latest'],
[EModelEndpoint.agents]: sharedOpenAIModels, // TODO: Add agent models (agentsModels)
[EModelEndpoint.google]: [
// Shared Google Models between Vertex AI & Gen AI
@@ -742,8 +745,8 @@ export const defaultModels = {
],
[EModelEndpoint.anthropic]: sharedAnthropicModels,
[EModelEndpoint.openAI]: [
'chatgpt-4o-latest',
...sharedOpenAIModels,
'chatgpt-4o-latest',
'gpt-4-vision-preview',
'gpt-3.5-turbo-instruct-0914',
'gpt-3.5-turbo-instruct',
@@ -808,6 +811,7 @@ export const supportsBalanceCheck = {
};
export const visionModels = [
'gpt-4.5',
'gpt-4o',
'gpt-4o-mini',
'o1',
@@ -856,7 +860,7 @@ export function validateVisionModel({
return visionModels.concat(additionalModels).some((visionModel) => model.includes(visionModel));
}
export const imageGenTools = new Set(['dalle', 'dall-e', 'stable-diffusion']);
export const imageGenTools = new Set(['dalle', 'dall-e', 'stable-diffusion', 'flux']);
/**
* Enum for collections using infinite queries

View File

@@ -775,6 +775,14 @@ export function getBanner(): Promise<t.TBannerResponse> {
return request.get(endpoints.banner());
}
export const updateUserEncryption = (
payload: t.UpdateUserEncryptionRequest,
): Promise<t.UpdateUserEncryptionResponse> => {
return request.put(endpoints.encryption(), payload);
};
export function enableTwoFactor(): Promise<t.TEnable2FAResponse> {
return request.get(endpoints.enableTwoFactor());
}
@@ -803,4 +811,4 @@ export function verifyTwoFactorTemp(
payload: t.TVerify2FATempRequest,
): Promise<t.TVerify2FATempResponse> {
return request.post(endpoints.verifyTwoFactorTemp(), payload);
}
}

View File

@@ -31,5 +31,6 @@ export { default as request } from './request';
export { dataService };
import * as dataService from './data-service';
/* general helpers */
export * from './utils';
export * from './actions';
export { default as createPayload } from './createPayload';

View File

@@ -67,6 +67,7 @@ export enum MutationKeys {
deleteAgentAction = 'deleteAgentAction',
deleteUser = 'deleteUser',
updateRole = 'updateRole',
updateUserEncryption = 'updateUserEncryption',
enableTwoFactor = 'enableTwoFactor',
verifyTwoFactor = 'verifyTwoFactor',
}

View File

@@ -1,4 +1,5 @@
import { z } from 'zod';
import { extractEnvVariable } from './utils';
const BaseOptionsSchema = z.object({
iconPath: z.string().optional(),
@@ -18,8 +19,22 @@ export const StdioOptionsSchema = BaseOptionsSchema.extend({
* The environment to use when spawning the process.
*
* If not specified, the result of getDefaultEnvironment() will be used.
* Environment variables can be referenced using ${VAR_NAME} syntax.
*/
env: z.record(z.string(), z.string()).optional(),
env: z
.record(z.string(), z.string())
.optional()
.transform((env) => {
if (!env) {
return env;
}
const processedEnv: Record<string, string> = {};
for (const [key, value] of Object.entries(env)) {
processedEnv[key] = extractEnvVariable(value);
}
return processedEnv;
}),
/**
* How to handle stderr of the child process. This matches the semantics of Node's `child_process.spawn`.
*

View File

@@ -19,6 +19,7 @@ import {
compactAssistantSchema,
} from './schemas';
import { bedrockInputSchema } from './bedrock';
import { extractEnvVariable } from './utils';
import { alternateName } from './config';
type EndpointSchema =
@@ -122,18 +123,6 @@ export function errorsToString(errors: ZodIssue[]) {
.join(' ');
}
export const envVarRegex = /^\${(.+)}$/;
/** Extracts the value of an environment variable from a string. */
export function extractEnvVariable(value: string) {
const envVarMatch = value.match(envVarRegex);
if (envVarMatch) {
// eslint-disable-next-line @typescript-eslint/strict-boolean-expressions
return process.env[envVarMatch[1]] || value;
}
return value;
}
/** Resolves header values to env variables if detected */
export function resolveHeaders(headers: Record<string, string> | undefined) {
const resolvedHeaders = { ...(headers ?? {}) };
@@ -211,6 +200,29 @@ export const parseConvo = ({
return convo;
};
/** Match GPT followed by digit, optional decimal, and optional suffix
*
* Examples: gpt-4, gpt-4o, gpt-4.5, gpt-5a, etc. */
const extractGPTVersion = (modelStr: string): string => {
const gptMatch = modelStr.match(/gpt-(\d+(?:\.\d+)?)([a-z])?/i);
if (gptMatch) {
const version = gptMatch[1];
const suffix = gptMatch[2] || '';
return `GPT-${version}${suffix}`;
}
return '';
};
/** Match omni models (o1, o3, etc.), "o" followed by a digit, possibly with decimal */
const extractOmniVersion = (modelStr: string): string => {
const omniMatch = modelStr.match(/\bo(\d+(?:\.\d+)?)\b/i);
if (omniMatch) {
const version = omniMatch[1];
return `o${version}`;
}
return '';
};
export const getResponseSender = (endpointOption: t.TEndpointOption): string => {
const {
model: _m,
@@ -238,18 +250,13 @@ export const getResponseSender = (endpointOption: t.TEndpointOption): string =>
return chatGptLabel;
} else if (modelLabel) {
return modelLabel;
} else if (model && /\bo1\b/i.test(model)) {
return 'o1';
} else if (model && /\bo3\b/i.test(model)) {
return 'o3';
} else if (model && model.includes('gpt-3')) {
return 'GPT-3.5';
} else if (model && model.includes('gpt-4o')) {
return 'GPT-4o';
} else if (model && model.includes('gpt-4')) {
return 'GPT-4';
} else if (model && extractOmniVersion(model)) {
return extractOmniVersion(model);
} else if (model && model.includes('mistral')) {
return 'Mistral';
} else if (model && model.includes('gpt-')) {
const gptVersion = extractGPTVersion(model);
return gptVersion || 'GPT';
}
return (alternateName[endpoint] as string | undefined) ?? 'ChatGPT';
}
@@ -279,14 +286,13 @@ export const getResponseSender = (endpointOption: t.TEndpointOption): string =>
return modelLabel;
} else if (chatGptLabel) {
return chatGptLabel;
} else if (model && extractOmniVersion(model)) {
return extractOmniVersion(model);
} else if (model && model.includes('mistral')) {
return 'Mistral';
} else if (model && model.includes('gpt-3')) {
return 'GPT-3.5';
} else if (model && model.includes('gpt-4o')) {
return 'GPT-4o';
} else if (model && model.includes('gpt-4')) {
return 'GPT-4';
} else if (model && model.includes('gpt-')) {
const gptVersion = extractGPTVersion(model);
return gptVersion || 'GPT';
} else if (modelDisplayLabel) {
return modelDisplayLabel;
}

View File

@@ -179,34 +179,34 @@ export const isImageVisionTool = (tool: FunctionTool | FunctionToolCall) =>
export const openAISettings = {
model: {
default: 'gpt-4o',
default: 'gpt-4o-mini' as const,
},
temperature: {
min: 0,
max: 2,
step: 0.01,
default: 1,
min: 0 as const,
max: 2 as const,
step: 0.01 as const,
default: 1 as const,
},
top_p: {
min: 0,
max: 1,
step: 0.01,
default: 1,
min: 0 as const,
max: 1 as const,
step: 0.01 as const,
default: 1 as const,
},
presence_penalty: {
min: 0,
max: 2,
step: 0.01,
default: 0,
min: 0 as const,
max: 2 as const,
step: 0.01 as const,
default: 0 as const,
},
frequency_penalty: {
min: 0,
max: 2,
step: 0.01,
default: 0,
min: 0 as const,
max: 2 as const,
step: 0.01 as const,
default: 0 as const,
},
resendFiles: {
default: true,
default: true as const,
},
maxContextTokens: {
default: undefined,
@@ -215,72 +215,72 @@ export const openAISettings = {
default: undefined,
},
imageDetail: {
default: ImageDetail.auto,
min: 0,
max: 2,
step: 1,
default: ImageDetail.auto as const,
min: 0 as const,
max: 2 as const,
step: 1 as const,
},
};
export const googleSettings = {
model: {
default: 'gemini-1.5-flash-latest',
default: 'gemini-1.5-flash-latest' as const,
},
maxOutputTokens: {
min: 1,
max: 8192,
step: 1,
default: 8192,
min: 1 as const,
max: 8192 as const,
step: 1 as const,
default: 8192 as const,
},
temperature: {
min: 0,
max: 2,
step: 0.01,
default: 1,
min: 0 as const,
max: 2 as const,
step: 0.01 as const,
default: 1 as const,
},
topP: {
min: 0,
max: 1,
step: 0.01,
default: 0.95,
min: 0 as const,
max: 1 as const,
step: 0.01 as const,
default: 0.95 as const,
},
topK: {
min: 1,
max: 40,
step: 1,
default: 40,
min: 1 as const,
max: 40 as const,
step: 1 as const,
default: 40 as const,
},
};
const ANTHROPIC_MAX_OUTPUT = 128000;
const DEFAULT_MAX_OUTPUT = 8192;
const LEGACY_ANTHROPIC_MAX_OUTPUT = 4096;
const ANTHROPIC_MAX_OUTPUT = 128000 as const;
const DEFAULT_MAX_OUTPUT = 8192 as const;
const LEGACY_ANTHROPIC_MAX_OUTPUT = 4096 as const;
export const anthropicSettings = {
model: {
default: 'claude-3-5-sonnet-latest',
default: 'claude-3-5-sonnet-latest' as const,
},
temperature: {
min: 0,
max: 1,
step: 0.01,
default: 1,
min: 0 as const,
max: 1 as const,
step: 0.01 as const,
default: 1 as const,
},
promptCache: {
default: true,
default: true as const,
},
thinking: {
default: true,
default: true as const,
},
thinkingBudget: {
min: 1024,
step: 100,
max: 200000,
default: 2000,
min: 1024 as const,
step: 100 as const,
max: 200000 as const,
default: 2000 as const,
},
maxOutputTokens: {
min: 1,
min: 1 as const,
max: ANTHROPIC_MAX_OUTPUT,
step: 1,
step: 1 as const,
default: DEFAULT_MAX_OUTPUT,
reset: (modelName: string) => {
if (/claude-3[-.]5-sonnet/.test(modelName) || /claude-3[-.]7/.test(modelName)) {
@@ -301,28 +301,28 @@ export const anthropicSettings = {
},
},
topP: {
min: 0,
max: 1,
step: 0.01,
default: 0.7,
min: 0 as const,
max: 1 as const,
step: 0.01 as const,
default: 0.7 as const,
},
topK: {
min: 1,
max: 40,
step: 1,
default: 5,
min: 1 as const,
max: 40 as const,
step: 1 as const,
default: 5 as const,
},
resendFiles: {
default: true,
default: true as const,
},
maxContextTokens: {
default: undefined,
},
legacy: {
maxOutputTokens: {
min: 1,
min: 1 as const,
max: LEGACY_ANTHROPIC_MAX_OUTPUT,
step: 1,
step: 1 as const,
default: LEGACY_ANTHROPIC_MAX_OUTPUT,
},
},
@@ -330,34 +330,34 @@ export const anthropicSettings = {
export const agentsSettings = {
model: {
default: 'gpt-3.5-turbo-test',
default: 'gpt-3.5-turbo-test' as const,
},
temperature: {
min: 0,
max: 1,
step: 0.01,
default: 1,
min: 0 as const,
max: 1 as const,
step: 0.01 as const,
default: 1 as const,
},
top_p: {
min: 0,
max: 1,
step: 0.01,
default: 1,
min: 0 as const,
max: 1 as const,
step: 0.01 as const,
default: 1 as const,
},
presence_penalty: {
min: 0,
max: 2,
step: 0.01,
default: 0,
min: 0 as const,
max: 2 as const,
step: 0.01 as const,
default: 0 as const,
},
frequency_penalty: {
min: 0,
max: 2,
step: 0.01,
default: 0,
min: 0 as const,
max: 2 as const,
step: 0.01 as const,
default: 0 as const,
},
resendFiles: {
default: true,
default: true as const,
},
maxContextTokens: {
default: undefined,
@@ -366,7 +366,7 @@ export const agentsSettings = {
default: undefined,
},
imageDetail: {
default: ImageDetail.auto,
default: ImageDetail.auto as const,
},
};
@@ -496,14 +496,17 @@ export const tMessageSchema = z.object({
thread_id: z.string().optional(),
/* frontend components */
iconURL: z.string().nullable().optional(),
iv: z.string().nullable().optional(),
authTag: z.string().nullable().optional(),
encryptedKey: z.string().nullable().optional(),
});
export type TAttachmentMetadata = { messageId: string; toolCallId: string };
export type TAttachment =
| (TFile & TAttachmentMetadata)
| (Pick<TFile, 'filename' | 'filepath' | 'conversationId'> & {
expiresAt: number;
} & TAttachmentMetadata);
expiresAt: number;
} & TAttachmentMetadata);
export type TMessage = z.input<typeof tMessageSchema> & {
children?: TMessage[];
@@ -515,6 +518,7 @@ export type TMessage = z.input<typeof tMessageSchema> & {
siblingIndex?: number;
attachments?: TAttachment[];
clientTimestamp?: string;
messageEncryptionIV?: string;
};
export const coerceNumber = z.union([z.number(), z.string()]).transform((val) => {
@@ -768,11 +772,11 @@ export const googleSchema = tConversationSchema
.catch(() => ({}));
/**
* TODO: Map the following fields:
- presence_penalty -> presencePenalty
- frequency_penalty -> frequencyPenalty
- stop -> stopSequences
*/
* TODO: Map the following fields:
- presence_penalty -> presencePenalty
- frequency_penalty -> frequencyPenalty
- stop -> stopSequences
*/
export const googleGenConfigSchema = z
.object({
maxOutputTokens: coerceNumber.optional(),

View File

@@ -41,11 +41,11 @@ export type TEndpointOption = {
export type TPayload = Partial<TMessage> &
Partial<TEndpointOption> & {
isContinued: boolean;
conversationId: string | null;
messages?: TMessages;
isTemporary: boolean;
};
isContinued: boolean;
conversationId: string | null;
messages?: TMessages;
isTemporary: boolean;
};
export type TSubmission = {
artifacts?: string;
@@ -115,6 +115,7 @@ export type TUser = {
role: string;
provider: string;
plugins?: string[];
decryptedPrivateKey?: CryptoKey | string;
backupCodes?: TBackupCode[];
createdAt: string;
updatedAt: string;
@@ -530,3 +531,21 @@ export type TAcceptTermsResponse = {
};
export type TBannerResponse = TBanner | null;
/**
* Request type for updating user encryption keys.
*/
export type UpdateUserEncryptionRequest = {
encryptionPublicKey: string | null;
encryptedPrivateKey: string | null;
encryptionSalt: string | null;
encryptionIV: string | null;
};
/**
* Response type for updating user encryption keys.
*/
export type UpdateUserEncryptionResponse = {
success: boolean;
message?: string;
};

View File

@@ -0,0 +1,44 @@
export const envVarRegex = /^\${(.+)}$/;
/** Extracts the value of an environment variable from a string. */
export function extractEnvVariable(value: string) {
if (!value) {
return value;
}
// Trim the input
const trimmed = value.trim();
// Special case: if it's just a single environment variable
const singleMatch = trimmed.match(envVarRegex);
if (singleMatch) {
const varName = singleMatch[1];
return process.env[varName] || trimmed;
}
// For multiple variables, process them using a regex loop
const regex = /\${([^}]+)}/g;
let result = trimmed;
// First collect all matches and their positions
const matches = [];
let match;
while ((match = regex.exec(trimmed)) !== null) {
matches.push({
fullMatch: match[0],
varName: match[1],
index: match.index,
});
}
// Process matches in reverse order to avoid position shifts
for (let i = matches.length - 1; i >= 0; i--) {
const { fullMatch, varName, index } = matches[i];
const envValue = process.env[varName] || fullMatch;
// Replace at exact position
result = result.substring(0, index) + envValue + result.substring(index + fullMatch.length);
}
return result;
}