Compare commits
45 Commits
feat/mcp-p
...
feat/multi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
97a6074edc | ||
|
|
404d40cbef | ||
|
|
f4680b016c | ||
|
|
077224b351 | ||
|
|
9c70d1db96 | ||
|
|
543281da6c | ||
|
|
24800bfbeb | ||
|
|
07e08143e4 | ||
|
|
8ba61a86f4 | ||
|
|
56ad92fb1c | ||
|
|
1ceb52d2b5 | ||
|
|
5d267aa8e2 | ||
|
|
59d00e99f3 | ||
|
|
738d04fac4 | ||
|
|
8a5dbac0f9 | ||
|
|
434289fe92 | ||
|
|
a648ad3d13 | ||
|
|
55d63caaf4 | ||
|
|
313539d1ed | ||
|
|
f869d772f7 | ||
|
|
126b1fe412 | ||
|
|
8053ef32bf | ||
|
|
a80c1dd2ce | ||
|
|
9962ec318c | ||
|
|
1dd644b72e | ||
|
|
c925f9f39c | ||
|
|
71effb1a66 | ||
|
|
e3acd18c07 | ||
|
|
c371491e71 | ||
|
|
122773a2be | ||
|
|
fcc1eb45f4 | ||
|
|
f434ddc125 | ||
|
|
5a9dcef1bc | ||
|
|
7c0324695a | ||
|
|
17c09e6e89 | ||
|
|
e488dab5db | ||
|
|
4b75890d1f | ||
|
|
4e82eab01c | ||
|
|
f4c50ed25b | ||
|
|
9885982233 | ||
|
|
6b60ee4df8 | ||
|
|
b170a57482 | ||
|
|
bb6a69e98a | ||
|
|
a88e99dcfb | ||
|
|
b7a6d7caa6 |
@@ -1,4 +1,4 @@
|
||||
# v0.7.8
|
||||
# v0.7.9-rc1
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Dockerfile.multi
|
||||
# v0.7.8
|
||||
# v0.7.9-rc1
|
||||
|
||||
# Base for all builds
|
||||
FROM node:20-alpine AS base-min
|
||||
|
||||
@@ -13,7 +13,6 @@ const {
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { truncateToolCallOutputs } = require('./prompts');
|
||||
const { addSpaceIfNeeded } = require('~/server/utils');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const TextStream = require('./TextStream');
|
||||
const { logger } = require('~/config');
|
||||
@@ -572,7 +571,7 @@ class BaseClient {
|
||||
});
|
||||
}
|
||||
|
||||
const { generation = '' } = opts;
|
||||
const { editedContent } = opts;
|
||||
|
||||
// It's not necessary to push to currentMessages
|
||||
// depending on subclass implementation of handling messages
|
||||
@@ -587,11 +586,21 @@ class BaseClient {
|
||||
isCreatedByUser: false,
|
||||
model: this.modelOptions?.model ?? this.model,
|
||||
sender: this.sender,
|
||||
text: generation,
|
||||
};
|
||||
this.currentMessages.push(userMessage, latestMessage);
|
||||
} else {
|
||||
latestMessage.text = generation;
|
||||
} else if (editedContent != null) {
|
||||
// Handle editedContent for content parts
|
||||
if (editedContent && latestMessage.content && Array.isArray(latestMessage.content)) {
|
||||
const { index, text, type } = editedContent;
|
||||
if (index >= 0 && index < latestMessage.content.length) {
|
||||
const contentPart = latestMessage.content[index];
|
||||
if (type === ContentTypes.THINK && contentPart.type === ContentTypes.THINK) {
|
||||
contentPart[ContentTypes.THINK] = text;
|
||||
} else if (type === ContentTypes.TEXT && contentPart.type === ContentTypes.TEXT) {
|
||||
contentPart[ContentTypes.TEXT] = text;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
this.continued = true;
|
||||
} else {
|
||||
@@ -672,16 +681,32 @@ class BaseClient {
|
||||
};
|
||||
|
||||
if (typeof completion === 'string') {
|
||||
responseMessage.text = addSpaceIfNeeded(generation) + completion;
|
||||
responseMessage.text = completion;
|
||||
} else if (
|
||||
Array.isArray(completion) &&
|
||||
(this.clientName === EModelEndpoint.agents ||
|
||||
isParamEndpoint(this.options.endpoint, this.options.endpointType))
|
||||
) {
|
||||
responseMessage.text = '';
|
||||
responseMessage.content = completion;
|
||||
|
||||
if (!opts.editedContent || this.currentMessages.length === 0) {
|
||||
responseMessage.content = completion;
|
||||
} else {
|
||||
const latestMessage = this.currentMessages[this.currentMessages.length - 1];
|
||||
if (!latestMessage?.content) {
|
||||
responseMessage.content = completion;
|
||||
} else {
|
||||
const existingContent = [...latestMessage.content];
|
||||
const { type: editedType } = opts.editedContent;
|
||||
responseMessage.content = this.mergeEditedContent(
|
||||
existingContent,
|
||||
completion,
|
||||
editedType,
|
||||
);
|
||||
}
|
||||
}
|
||||
} else if (Array.isArray(completion)) {
|
||||
responseMessage.text = addSpaceIfNeeded(generation) + completion.join('');
|
||||
responseMessage.text = completion.join('');
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -1095,6 +1120,50 @@ class BaseClient {
|
||||
return numTokens;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merges completion content with existing content when editing TEXT or THINK types
|
||||
* @param {Array} existingContent - The existing content array
|
||||
* @param {Array} newCompletion - The new completion content
|
||||
* @param {string} editedType - The type of content being edited
|
||||
* @returns {Array} The merged content array
|
||||
*/
|
||||
mergeEditedContent(existingContent, newCompletion, editedType) {
|
||||
if (!newCompletion.length) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
if (editedType !== ContentTypes.TEXT && editedType !== ContentTypes.THINK) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
const lastIndex = existingContent.length - 1;
|
||||
const lastExisting = existingContent[lastIndex];
|
||||
const firstNew = newCompletion[0];
|
||||
|
||||
if (lastExisting?.type !== firstNew?.type || firstNew?.type !== editedType) {
|
||||
return existingContent.concat(newCompletion);
|
||||
}
|
||||
|
||||
const mergedContent = [...existingContent];
|
||||
if (editedType === ContentTypes.TEXT) {
|
||||
mergedContent[lastIndex] = {
|
||||
...mergedContent[lastIndex],
|
||||
[ContentTypes.TEXT]:
|
||||
(mergedContent[lastIndex][ContentTypes.TEXT] || '') + (firstNew[ContentTypes.TEXT] || ''),
|
||||
};
|
||||
} else {
|
||||
mergedContent[lastIndex] = {
|
||||
...mergedContent[lastIndex],
|
||||
[ContentTypes.THINK]:
|
||||
(mergedContent[lastIndex][ContentTypes.THINK] || '') +
|
||||
(firstNew[ContentTypes.THINK] || ''),
|
||||
};
|
||||
}
|
||||
|
||||
// Add remaining completion items
|
||||
return mergedContent.concat(newCompletion.slice(1));
|
||||
}
|
||||
|
||||
async sendPayload(payload, opts = {}) {
|
||||
if (opts && typeof opts === 'object') {
|
||||
this.setOptions(opts);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -48,7 +48,7 @@
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.51",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@node-saml/passport-saml": "^5.0.0",
|
||||
|
||||
@@ -525,7 +525,10 @@ class AgentClient extends BaseClient {
|
||||
messagesToProcess = [...messages.slice(-messageWindowSize)];
|
||||
}
|
||||
}
|
||||
return await this.processMemory(messagesToProcess);
|
||||
|
||||
const bufferString = getBufferString(messagesToProcess);
|
||||
const bufferMessage = new HumanMessage(`# Current Chat:\n\n${bufferString}`);
|
||||
return await this.processMemory([bufferMessage]);
|
||||
} catch (error) {
|
||||
logger.error('Memory Agent failed to process memory', error);
|
||||
}
|
||||
|
||||
@@ -14,8 +14,11 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
text,
|
||||
endpointOption,
|
||||
conversationId,
|
||||
isContinued = false,
|
||||
editedContent = null,
|
||||
parentMessageId = null,
|
||||
overrideParentMessageId = null,
|
||||
responseMessageId: editedResponseMessageId = null,
|
||||
} = req.body;
|
||||
|
||||
let sender;
|
||||
@@ -67,7 +70,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
handler();
|
||||
}
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
logger.error('[AgentController] Error in cleanup handler', e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -155,7 +158,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
try {
|
||||
res.removeListener('close', closeHandler);
|
||||
} catch (e) {
|
||||
// Ignore
|
||||
logger.error('[AgentController] Error removing close listener', e);
|
||||
}
|
||||
});
|
||||
|
||||
@@ -163,10 +166,14 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
|
||||
user: userId,
|
||||
onStart,
|
||||
getReqData,
|
||||
isContinued,
|
||||
editedContent,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
abortController,
|
||||
overrideParentMessageId,
|
||||
isEdited: !!editedContent,
|
||||
responseMessageId: editedResponseMessageId,
|
||||
progressOptions: {
|
||||
res,
|
||||
},
|
||||
|
||||
@@ -477,7 +477,9 @@ describe('Multer Configuration', () => {
|
||||
done(new Error('Expected mkdirSync to throw an error but no error was thrown'));
|
||||
} catch (error) {
|
||||
// This is the expected behavior - mkdirSync throws synchronously for invalid paths
|
||||
expect(error.code).toBe('EACCES');
|
||||
// On Linux, this typically returns EACCES (permission denied)
|
||||
// On macOS/Darwin, this returns ENOENT (no such file or directory)
|
||||
expect(['EACCES', 'ENOENT']).toContain(error.code);
|
||||
done();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -235,12 +235,13 @@ router.put('/:conversationId/:messageId', validateMessageReq, async (req, res) =
|
||||
return res.status(400).json({ error: 'Content part not found' });
|
||||
}
|
||||
|
||||
if (updatedContent[index].type !== ContentTypes.TEXT) {
|
||||
const currentPartType = updatedContent[index].type;
|
||||
if (currentPartType !== ContentTypes.TEXT && currentPartType !== ContentTypes.THINK) {
|
||||
return res.status(400).json({ error: 'Cannot update non-text content' });
|
||||
}
|
||||
|
||||
const oldText = updatedContent[index].text;
|
||||
updatedContent[index] = { type: ContentTypes.TEXT, text };
|
||||
const oldText = updatedContent[index][currentPartType];
|
||||
updatedContent[index] = { type: currentPartType, [currentPartType]: text };
|
||||
|
||||
let tokenCount = message.tokenCount;
|
||||
if (tokenCount !== undefined) {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { loadServiceKey } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const { config } = require('./EndpointService');
|
||||
@@ -18,15 +18,7 @@ async function loadAsyncEndpoints(req) {
|
||||
path.join(__dirname, '../../..', 'data', 'auth.json');
|
||||
|
||||
try {
|
||||
if (process.env.GOOGLE_SERVICE_KEY_FILE_PATH) {
|
||||
const absolutePath = path.isAbsolute(serviceKeyPath)
|
||||
? serviceKeyPath
|
||||
: path.resolve(serviceKeyPath);
|
||||
const fileContent = fs.readFileSync(absolutePath, 'utf8');
|
||||
serviceKey = JSON.parse(fileContent);
|
||||
} else {
|
||||
serviceKey = require('~/data/auth.json');
|
||||
}
|
||||
serviceKey = await loadServiceKey(serviceKeyPath);
|
||||
} catch {
|
||||
if (i === 0) {
|
||||
i++;
|
||||
|
||||
@@ -85,7 +85,7 @@ const initializeAgent = async ({
|
||||
});
|
||||
|
||||
const provider = agent.provider;
|
||||
const { tools, toolContextMap } =
|
||||
const { tools: structuredTools, toolContextMap } =
|
||||
(await loadTools?.({
|
||||
req,
|
||||
res,
|
||||
@@ -140,6 +140,22 @@ const initializeAgent = async ({
|
||||
agent.provider = options.provider;
|
||||
}
|
||||
|
||||
/** @type {import('@librechat/agents').GenericTool[]} */
|
||||
let tools = options.tools?.length ? options.tools : structuredTools;
|
||||
if (
|
||||
(agent.provider === Providers.GOOGLE || agent.provider === Providers.VERTEXAI) &&
|
||||
options.tools?.length &&
|
||||
structuredTools?.length
|
||||
) {
|
||||
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
|
||||
} else if (
|
||||
(agent.provider === Providers.OPENAI || agent.provider === Providers.AZURE) &&
|
||||
options.tools?.length &&
|
||||
structuredTools?.length
|
||||
) {
|
||||
tools = structuredTools.concat(options.tools);
|
||||
}
|
||||
|
||||
/** @type {import('@librechat/agents').ClientOptions} */
|
||||
agent.model_parameters = { ...options.llmConfig };
|
||||
if (options.configOptions) {
|
||||
@@ -162,10 +178,10 @@ const initializeAgent = async ({
|
||||
|
||||
return {
|
||||
...agent,
|
||||
tools,
|
||||
attachments,
|
||||
resendFiles,
|
||||
toolContextMap,
|
||||
tools,
|
||||
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const { getGoogleConfig, isEnabled } = require('@librechat/api');
|
||||
const { EModelEndpoint, AuthKeys } = require('librechat-data-provider');
|
||||
const { getGoogleConfig, isEnabled, loadServiceKey } = require('@librechat/api');
|
||||
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
|
||||
const { GoogleClient } = require('~/app');
|
||||
|
||||
@@ -19,17 +18,12 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
|
||||
let serviceKey = {};
|
||||
|
||||
try {
|
||||
if (process.env.GOOGLE_SERVICE_KEY_FILE_PATH) {
|
||||
const serviceKeyPath =
|
||||
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
|
||||
path.join(__dirname, '../../../../..', 'data', 'auth.json');
|
||||
const absolutePath = path.isAbsolute(serviceKeyPath)
|
||||
? serviceKeyPath
|
||||
: path.resolve(serviceKeyPath);
|
||||
const fileContent = fs.readFileSync(absolutePath, 'utf8');
|
||||
serviceKey = JSON.parse(fileContent);
|
||||
} else {
|
||||
serviceKey = require('~/data/auth.json');
|
||||
const serviceKeyPath =
|
||||
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
|
||||
path.join(__dirname, '../../../..', 'data', 'auth.json');
|
||||
serviceKey = await loadServiceKey(serviceKeyPath);
|
||||
if (!serviceKey) {
|
||||
serviceKey = {};
|
||||
}
|
||||
} catch (_e) {
|
||||
// Do nothing
|
||||
|
||||
@@ -7,6 +7,16 @@ const initCustom = require('~/server/services/Endpoints/custom/initialize');
|
||||
const initGoogle = require('~/server/services/Endpoints/google/initialize');
|
||||
const { getCustomEndpointConfig } = require('~/server/services/Config');
|
||||
|
||||
/** Check if the provider is a known custom provider
|
||||
* @param {string | undefined} [provider] - The provider string
|
||||
* @returns {boolean} - True if the provider is a known custom provider, false otherwise
|
||||
*/
|
||||
function isKnownCustomProvider(provider) {
|
||||
return [Providers.XAI, Providers.OLLAMA, Providers.DEEPSEEK, Providers.OPENROUTER].includes(
|
||||
provider || '',
|
||||
);
|
||||
}
|
||||
|
||||
const providerConfigMap = {
|
||||
[Providers.XAI]: initCustom,
|
||||
[Providers.OLLAMA]: initCustom,
|
||||
@@ -46,6 +56,13 @@ async function getProviderConfig(provider) {
|
||||
overrideProvider = Providers.OPENAI;
|
||||
}
|
||||
|
||||
if (isKnownCustomProvider(overrideProvider)) {
|
||||
customEndpointConfig = await getCustomEndpointConfig(provider);
|
||||
if (!customEndpointConfig) {
|
||||
throw new Error(`Provider ${provider} not supported`);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
getOptions,
|
||||
overrideProvider,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { Strategy: JwtStrategy, ExtractJwt } = require('passport-jwt');
|
||||
const { updateUser, findUser } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
@@ -13,17 +14,23 @@ const { isEnabled } = require('~/server/utils');
|
||||
* The strategy extracts the JWT from the Authorization header as a Bearer token.
|
||||
* The JWT is then verified using the signing key, and the user is retrieved from the database.
|
||||
*/
|
||||
const openIdJwtLogin = (openIdConfig) =>
|
||||
new JwtStrategy(
|
||||
const openIdJwtLogin = (openIdConfig) => {
|
||||
let jwksRsaOptions = {
|
||||
cache: isEnabled(process.env.OPENID_JWKS_URL_CACHE_ENABLED) || true,
|
||||
cacheMaxAge: process.env.OPENID_JWKS_URL_CACHE_TIME
|
||||
? eval(process.env.OPENID_JWKS_URL_CACHE_TIME)
|
||||
: 60000,
|
||||
jwksUri: openIdConfig.serverMetadata().jwks_uri,
|
||||
};
|
||||
|
||||
if (process.env.PROXY) {
|
||||
jwksRsaOptions.requestAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
return new JwtStrategy(
|
||||
{
|
||||
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
|
||||
secretOrKeyProvider: jwksRsa.passportJwtSecret({
|
||||
cache: isEnabled(process.env.OPENID_JWKS_URL_CACHE_ENABLED) || true,
|
||||
cacheMaxAge: process.env.OPENID_JWKS_URL_CACHE_TIME
|
||||
? eval(process.env.OPENID_JWKS_URL_CACHE_TIME)
|
||||
: 60000,
|
||||
jwksUri: openIdConfig.serverMetadata().jwks_uri,
|
||||
}),
|
||||
secretOrKeyProvider: jwksRsa.passportJwtSecret(jwksRsaOptions),
|
||||
},
|
||||
async (payload, done) => {
|
||||
try {
|
||||
@@ -48,5 +55,6 @@ const openIdJwtLogin = (openIdConfig) =>
|
||||
}
|
||||
},
|
||||
);
|
||||
};
|
||||
|
||||
module.exports = openIdJwtLogin;
|
||||
|
||||
@@ -49,7 +49,7 @@ async function customFetch(url, options) {
|
||||
logger.info(`[openidStrategy] proxy agent configured: ${process.env.PROXY}`);
|
||||
fetchOptions = {
|
||||
...options,
|
||||
dispatcher: new HttpsProxyAgent(process.env.PROXY),
|
||||
dispatcher: new undici.ProxyAgent(process.env.PROXY),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
module.exports = {
|
||||
roots: ['<rootDir>/src'],
|
||||
roots: ['<rootDir>/src', '<rootDir>/../terms'],
|
||||
testEnvironment: 'jsdom',
|
||||
testEnvironmentOptions: {
|
||||
url: 'http://localhost:3080',
|
||||
@@ -29,6 +29,7 @@ module.exports = {
|
||||
'^test/(.*)$': '<rootDir>/test/$1',
|
||||
'^~/(.*)$': '<rootDir>/src/$1',
|
||||
'^librechat-data-provider/react-query$': '<rootDir>/../node_modules/librechat-data-provider/src/react-query',
|
||||
'^.+\\.md\\?raw$': '<rootDir>/test/rawFileMock.js',
|
||||
},
|
||||
restoreMocks: true,
|
||||
testResultsProcessor: 'jest-junit',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/frontend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"description": "",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
|
||||
@@ -336,6 +336,11 @@ export type TAskProps = {
|
||||
export type TOptions = {
|
||||
editedMessageId?: string | null;
|
||||
editedText?: string | null;
|
||||
editedContent?: {
|
||||
index: number;
|
||||
text: string;
|
||||
type: 'text' | 'think';
|
||||
};
|
||||
isRegenerate?: boolean;
|
||||
isContinued?: boolean;
|
||||
isEdited?: boolean;
|
||||
|
||||
@@ -4,18 +4,21 @@ import {
|
||||
supportsFiles,
|
||||
mergeFileConfig,
|
||||
isAgentsEndpoint,
|
||||
isAssistantsEndpoint,
|
||||
fileConfig as defaultFileConfig,
|
||||
} from 'librechat-data-provider';
|
||||
import type { EndpointFileConfig } from 'librechat-data-provider';
|
||||
import { useGetFileConfig } from '~/data-provider';
|
||||
import AttachFileMenu from './AttachFileMenu';
|
||||
import { useChatContext } from '~/Providers';
|
||||
import AttachFile from './AttachFile';
|
||||
|
||||
function AttachFileChat({ disableInputs }: { disableInputs: boolean }) {
|
||||
const { conversation } = useChatContext();
|
||||
const conversationId = conversation?.conversationId ?? Constants.NEW_CONVO;
|
||||
const { endpoint, endpointType } = conversation ?? { endpoint: null };
|
||||
const isAgents = useMemo(() => isAgentsEndpoint(endpoint), [endpoint]);
|
||||
const isAssistants = useMemo(() => isAssistantsEndpoint(endpoint), [endpoint]);
|
||||
|
||||
const { data: fileConfig = defaultFileConfig } = useGetFileConfig({
|
||||
select: (data) => mergeFileConfig(data),
|
||||
@@ -25,7 +28,9 @@ function AttachFileChat({ disableInputs }: { disableInputs: boolean }) {
|
||||
const endpointSupportsFiles: boolean = supportsFiles[endpointType ?? endpoint ?? ''] ?? false;
|
||||
const isUploadDisabled = (disableInputs || endpointFileConfig?.disabled) ?? false;
|
||||
|
||||
if (isAgents || (endpointSupportsFiles && !isUploadDisabled)) {
|
||||
if (isAssistants && endpointSupportsFiles && !isUploadDisabled) {
|
||||
return <AttachFile disabled={disableInputs} />;
|
||||
} else if (isAgents || (endpointSupportsFiles && !isUploadDisabled)) {
|
||||
return (
|
||||
<AttachFileMenu
|
||||
disabled={disableInputs}
|
||||
@@ -34,7 +39,6 @@ function AttachFileChat({ disableInputs }: { disableInputs: boolean }) {
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
@@ -81,14 +81,23 @@ const ContentParts = memo(
|
||||
return (
|
||||
<>
|
||||
{content.map((part, idx) => {
|
||||
if (part?.type !== ContentTypes.TEXT || typeof part.text !== 'string') {
|
||||
if (!part) {
|
||||
return null;
|
||||
}
|
||||
const isTextPart =
|
||||
part?.type === ContentTypes.TEXT ||
|
||||
typeof (part as unknown as Agents.MessageContentText)?.text !== 'string';
|
||||
const isThinkPart =
|
||||
part?.type === ContentTypes.THINK ||
|
||||
typeof (part as unknown as Agents.ReasoningDeltaUpdate)?.think !== 'string';
|
||||
if (!isTextPart && !isThinkPart) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<EditTextPart
|
||||
index={idx}
|
||||
text={part.text}
|
||||
part={part as Agents.MessageContentText | Agents.ReasoningDeltaUpdate}
|
||||
messageId={messageId}
|
||||
isSubmitting={isSubmitting}
|
||||
enterEdit={enterEdit}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useForm } from 'react-hook-form';
|
||||
import { ContentTypes } from 'librechat-data-provider';
|
||||
import { useRecoilState, useRecoilValue } from 'recoil';
|
||||
import { useRef, useEffect, useCallback, useMemo } from 'react';
|
||||
import { useUpdateMessageContentMutation } from 'librechat-data-provider/react-query';
|
||||
import type { Agents } from 'librechat-data-provider';
|
||||
import type { TEditProps } from '~/common';
|
||||
import Container from '~/components/Chat/Messages/Content/Container';
|
||||
import { useChatContext, useAddedChatContext } from '~/Providers';
|
||||
@@ -12,18 +13,19 @@ import { useLocalize } from '~/hooks';
|
||||
import store from '~/store';
|
||||
|
||||
const EditTextPart = ({
|
||||
text,
|
||||
part,
|
||||
index,
|
||||
messageId,
|
||||
isSubmitting,
|
||||
enterEdit,
|
||||
}: Omit<TEditProps, 'message' | 'ask'> & {
|
||||
}: Omit<TEditProps, 'message' | 'ask' | 'text'> & {
|
||||
index: number;
|
||||
messageId: string;
|
||||
part: Agents.MessageContentText | Agents.ReasoningDeltaUpdate;
|
||||
}) => {
|
||||
const localize = useLocalize();
|
||||
const { addedIndex } = useAddedChatContext();
|
||||
const { getMessages, setMessages, conversation } = useChatContext();
|
||||
const { ask, getMessages, setMessages, conversation } = useChatContext();
|
||||
const [latestMultiMessage, setLatestMultiMessage] = useRecoilState(
|
||||
store.latestMessageFamily(addedIndex),
|
||||
);
|
||||
@@ -34,15 +36,16 @@ const EditTextPart = ({
|
||||
[getMessages, messageId],
|
||||
);
|
||||
|
||||
const chatDirection = useRecoilValue(store.chatDirection);
|
||||
|
||||
const textAreaRef = useRef<HTMLTextAreaElement | null>(null);
|
||||
const updateMessageContentMutation = useUpdateMessageContentMutation(conversationId ?? '');
|
||||
|
||||
const chatDirection = useRecoilValue(store.chatDirection).toLowerCase();
|
||||
const isRTL = chatDirection === 'rtl';
|
||||
const isRTL = chatDirection?.toLowerCase() === 'rtl';
|
||||
|
||||
const { register, handleSubmit, setValue } = useForm({
|
||||
defaultValues: {
|
||||
text: text ?? '',
|
||||
text: (ContentTypes.THINK in part ? part.think : part.text) || '',
|
||||
},
|
||||
});
|
||||
|
||||
@@ -55,15 +58,7 @@ const EditTextPart = ({
|
||||
}
|
||||
}, []);
|
||||
|
||||
/*
|
||||
const resubmitMessage = () => {
|
||||
showToast({
|
||||
status: 'warning',
|
||||
message: localize('com_warning_resubmit_unsupported'),
|
||||
});
|
||||
|
||||
// const resubmitMessage = (data: { text: string }) => {
|
||||
// Not supported by AWS Bedrock
|
||||
const resubmitMessage = (data: { text: string }) => {
|
||||
const messages = getMessages();
|
||||
const parentMessage = messages?.find((msg) => msg.messageId === message?.parentMessageId);
|
||||
|
||||
@@ -73,17 +68,19 @@ const EditTextPart = ({
|
||||
ask(
|
||||
{ ...parentMessage },
|
||||
{
|
||||
editedText: data.text,
|
||||
editedContent: {
|
||||
index,
|
||||
text: data.text,
|
||||
type: part.type,
|
||||
},
|
||||
editedMessageId: messageId,
|
||||
isRegenerate: true,
|
||||
isEdited: true,
|
||||
},
|
||||
);
|
||||
|
||||
setSiblingIdx((siblingIdx ?? 0) - 1);
|
||||
enterEdit(true);
|
||||
};
|
||||
*/
|
||||
|
||||
const updateMessage = (data: { text: string }) => {
|
||||
const messages = getMessages();
|
||||
@@ -167,13 +164,13 @@ const EditTextPart = ({
|
||||
/>
|
||||
</div>
|
||||
<div className="mt-2 flex w-full justify-center text-center">
|
||||
{/* <button
|
||||
<button
|
||||
className="btn btn-primary relative mr-2"
|
||||
disabled={isSubmitting}
|
||||
onClick={handleSubmit(resubmitMessage)}
|
||||
>
|
||||
{localize('com_ui_save_submit')}
|
||||
</button> */}
|
||||
</button>
|
||||
<button
|
||||
className="btn btn-secondary relative mr-2"
|
||||
disabled={isSubmitting}
|
||||
|
||||
@@ -1,12 +1,20 @@
|
||||
import { useMemo } from 'react';
|
||||
import type { TTermsOfService } from 'librechat-data-provider';
|
||||
import React from 'react';
|
||||
import MarkdownLite from '~/components/Chat/Messages/Content/MarkdownLite';
|
||||
import DialogTemplate from '~/components/ui/DialogTemplate';
|
||||
import OGDialogTemplate from '~/components/ui/OGDialogTemplate';
|
||||
import { useAcceptTermsMutation } from '~/data-provider';
|
||||
import { OGDialog, Button, Spinner } from '~/components';
|
||||
import { useToastContext } from '~/Providers';
|
||||
import { OGDialog } from '~/components/ui';
|
||||
import { useLocalize } from '~/hooks';
|
||||
|
||||
interface TermsModalProps {
|
||||
open: boolean;
|
||||
onOpenChange: (isOpen: boolean) => void;
|
||||
onAccept: () => void;
|
||||
onDecline: () => void;
|
||||
title?: string;
|
||||
modalContent?: string;
|
||||
}
|
||||
|
||||
const TermsAndConditionsModal = ({
|
||||
open,
|
||||
onOpenChange,
|
||||
@@ -14,17 +22,10 @@ const TermsAndConditionsModal = ({
|
||||
onDecline,
|
||||
title,
|
||||
modalContent,
|
||||
}: {
|
||||
open: boolean;
|
||||
onOpenChange: (isOpen: boolean) => void;
|
||||
onAccept: () => void;
|
||||
onDecline: () => void;
|
||||
title?: string;
|
||||
contentUrl?: string;
|
||||
modalContent?: TTermsOfService['modalContent'];
|
||||
}) => {
|
||||
}: TermsModalProps) => {
|
||||
const localize = useLocalize();
|
||||
const { showToast } = useToastContext();
|
||||
|
||||
const acceptTermsMutation = useAcceptTermsMutation({
|
||||
onSuccess: () => {
|
||||
onAccept();
|
||||
@@ -35,6 +36,8 @@ const TermsAndConditionsModal = ({
|
||||
},
|
||||
});
|
||||
|
||||
const isLoading = acceptTermsMutation.isLoading;
|
||||
|
||||
const handleAccept = () => {
|
||||
acceptTermsMutation.mutate();
|
||||
};
|
||||
@@ -51,36 +54,22 @@ const TermsAndConditionsModal = ({
|
||||
onOpenChange(isOpen);
|
||||
};
|
||||
|
||||
const content = useMemo(() => {
|
||||
if (typeof modalContent === 'string') {
|
||||
return modalContent;
|
||||
}
|
||||
|
||||
if (Array.isArray(modalContent)) {
|
||||
return modalContent.join('\n');
|
||||
}
|
||||
|
||||
return '';
|
||||
}, [modalContent]);
|
||||
|
||||
return (
|
||||
<OGDialog open={open} onOpenChange={handleOpenChange}>
|
||||
<DialogTemplate
|
||||
<OGDialogTemplate
|
||||
title={title ?? localize('com_ui_terms_and_conditions')}
|
||||
className="w-11/12 max-w-3xl sm:w-3/4 md:w-1/2 lg:w-2/5"
|
||||
showCloseButton={false}
|
||||
showCancelButton={false}
|
||||
main={
|
||||
<section
|
||||
// Motivation: This is a dialog, so its content should be focusable
|
||||
// eslint-disable-next-line jsx-a11y/no-noninteractive-tabindex
|
||||
tabIndex={0}
|
||||
className="max-h-[60vh] overflow-y-auto p-4"
|
||||
aria-label={localize('com_ui_terms_and_conditions')}
|
||||
>
|
||||
<div className="prose dark:prose-invert w-full max-w-none !text-text-primary">
|
||||
{content !== '' ? (
|
||||
<MarkdownLite content={content} />
|
||||
<div className="prose dark:prose-invert w-full max-w-none text-text-primary">
|
||||
{modalContent ? (
|
||||
<MarkdownLite content={modalContent} />
|
||||
) : (
|
||||
<p>{localize('com_ui_no_terms_content')}</p>
|
||||
)}
|
||||
@@ -89,18 +78,12 @@ const TermsAndConditionsModal = ({
|
||||
}
|
||||
buttons={
|
||||
<>
|
||||
<button
|
||||
onClick={handleDecline}
|
||||
className="inline-flex h-10 items-center justify-center rounded-lg border border-border-heavy bg-surface-secondary px-4 py-2 text-sm text-text-primary hover:bg-surface-active"
|
||||
>
|
||||
<Button onClick={handleDecline} variant="destructive" disabled={isLoading}>
|
||||
{localize('com_ui_decline')}
|
||||
</button>
|
||||
<button
|
||||
onClick={handleAccept}
|
||||
className="inline-flex h-10 items-center justify-center rounded-lg border border-border-heavy bg-surface-secondary px-4 py-2 text-sm text-text-primary hover:bg-green-500 hover:text-white focus:bg-green-500 focus:text-white dark:hover:bg-green-600 dark:focus:bg-green-600"
|
||||
>
|
||||
{localize('com_ui_accept')}
|
||||
</button>
|
||||
</Button>
|
||||
<Button onClick={handleAccept} variant="submit" disabled={isLoading}>
|
||||
{isLoading ? <Spinner /> : localize('com_ui_accept')}
|
||||
</Button>
|
||||
</>
|
||||
}
|
||||
/>
|
||||
@@ -62,6 +62,7 @@ const errorMessages = {
|
||||
const { info } = json;
|
||||
return info;
|
||||
},
|
||||
[ErrorTypes.GOOGLE_TOOL_CONFLICT]: 'com_error_google_tool_conflict',
|
||||
[ViolationTypes.BAN]:
|
||||
'Your account has been temporarily banned due to violations of our service.',
|
||||
invalid_api_key:
|
||||
|
||||
@@ -86,6 +86,7 @@ export const LangSelector = ({
|
||||
{ value: 'fr-FR', label: localize('com_nav_lang_french') },
|
||||
{ value: 'he-HE', label: localize('com_nav_lang_hebrew') },
|
||||
{ value: 'hu-HU', label: localize('com_nav_lang_hungarian') },
|
||||
{ value: 'hy-AM', label: localize('com_nav_lang_armenian') },
|
||||
{ value: 'it-IT', label: localize('com_nav_lang_italian') },
|
||||
{ value: 'pl-PL', label: localize('com_nav_lang_polish') },
|
||||
{ value: 'pt-BR', label: localize('com_nav_lang_brazilian_portuguese') },
|
||||
@@ -96,9 +97,11 @@ export const LangSelector = ({
|
||||
{ value: 'cs-CZ', label: localize('com_nav_lang_czech') },
|
||||
{ value: 'sv-SE', label: localize('com_nav_lang_swedish') },
|
||||
{ value: 'ko-KR', label: localize('com_nav_lang_korean') },
|
||||
{ value: 'lv-LV', label: localize('com_nav_lang_latvian') },
|
||||
{ value: 'vi-VN', label: localize('com_nav_lang_vietnamese') },
|
||||
{ value: 'th-TH', label: localize('com_nav_lang_thai') },
|
||||
{ value: 'tr-TR', label: localize('com_nav_lang_turkish') },
|
||||
{ value: 'ug', label: localize('com_nav_lang_uyghur') },
|
||||
{ value: 'nl-NL', label: localize('com_nav_lang_dutch') },
|
||||
{ value: 'id-ID', label: localize('com_nav_lang_indonesia') },
|
||||
{ value: 'fi-FI', label: localize('com_nav_lang_finnish') },
|
||||
|
||||
@@ -226,7 +226,7 @@ export default function AgentTool({
|
||||
}}
|
||||
className={cn(
|
||||
'h-4 w-4 rounded border border-gray-300 transition-all duration-200 hover:border-gray-400 dark:border-gray-600 dark:hover:border-gray-500',
|
||||
isExpanded ? 'opacity-100' : 'opacity-0',
|
||||
isExpanded ? 'visible' : 'pointer-events-none invisible',
|
||||
)}
|
||||
onClick={(e) => e.stopPropagation()}
|
||||
onKeyDown={(e) => {
|
||||
|
||||
@@ -18,6 +18,7 @@ function DynamicSlider({
|
||||
setOption,
|
||||
optionType,
|
||||
options,
|
||||
enumMappings,
|
||||
readonly = false,
|
||||
showDefault = false,
|
||||
includeInput = true,
|
||||
@@ -60,24 +61,68 @@ function DynamicSlider({
|
||||
|
||||
const enumToNumeric = useMemo(() => {
|
||||
if (isEnum && options) {
|
||||
return options.reduce((acc, mapping, index) => {
|
||||
acc[mapping] = index;
|
||||
return acc;
|
||||
}, {} as Record<string, number>);
|
||||
return options.reduce(
|
||||
(acc, mapping, index) => {
|
||||
acc[mapping] = index;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<string, number>,
|
||||
);
|
||||
}
|
||||
return {};
|
||||
}, [isEnum, options]);
|
||||
|
||||
const valueToEnumOption = useMemo(() => {
|
||||
if (isEnum && options) {
|
||||
return options.reduce((acc, option, index) => {
|
||||
acc[index] = option;
|
||||
return acc;
|
||||
}, {} as Record<number, string>);
|
||||
return options.reduce(
|
||||
(acc, option, index) => {
|
||||
acc[index] = option;
|
||||
return acc;
|
||||
},
|
||||
{} as Record<number, string>,
|
||||
);
|
||||
}
|
||||
return {};
|
||||
}, [isEnum, options]);
|
||||
|
||||
const getDisplayValue = useCallback(
|
||||
(value: string | number | undefined | null): string => {
|
||||
if (isEnum && enumMappings && value != null) {
|
||||
const stringValue = String(value);
|
||||
// Check if the value exists in enumMappings
|
||||
if (stringValue in enumMappings) {
|
||||
const mappedValue = String(enumMappings[stringValue]);
|
||||
// Check if the mapped value is a localization key
|
||||
if (mappedValue.startsWith('com_')) {
|
||||
return localize(mappedValue as TranslationKeys) ?? mappedValue;
|
||||
}
|
||||
return mappedValue;
|
||||
}
|
||||
}
|
||||
// Always return a string for Input component compatibility
|
||||
if (value != null) {
|
||||
return String(value);
|
||||
}
|
||||
return String(defaultValue ?? '');
|
||||
},
|
||||
[isEnum, enumMappings, defaultValue, localize],
|
||||
);
|
||||
|
||||
const getDefaultDisplayValue = useCallback((): string => {
|
||||
if (defaultValue != null && enumMappings) {
|
||||
const stringDefault = String(defaultValue);
|
||||
if (stringDefault in enumMappings) {
|
||||
const mappedValue = String(enumMappings[stringDefault]);
|
||||
// Check if the mapped value is a localization key
|
||||
if (mappedValue.startsWith('com_')) {
|
||||
return localize(mappedValue as TranslationKeys) ?? mappedValue;
|
||||
}
|
||||
return mappedValue;
|
||||
}
|
||||
}
|
||||
return String(defaultValue ?? '');
|
||||
}, [defaultValue, enumMappings, localize]);
|
||||
|
||||
const handleValueChange = useCallback(
|
||||
(value: number) => {
|
||||
if (isEnum) {
|
||||
@@ -115,12 +160,12 @@ function DynamicSlider({
|
||||
<div className="flex w-full items-center justify-between">
|
||||
<Label
|
||||
htmlFor={`${settingKey}-dynamic-setting`}
|
||||
className="text-left text-sm font-medium"
|
||||
className="break-words text-left text-sm font-medium"
|
||||
>
|
||||
{labelCode ? localize(label as TranslationKeys) ?? label : label || settingKey}{' '}
|
||||
{labelCode ? (localize(label as TranslationKeys) ?? label) : label || settingKey}{' '}
|
||||
{showDefault && (
|
||||
<small className="opacity-40">
|
||||
({localize('com_endpoint_default')}: {defaultValue})
|
||||
({localize('com_endpoint_default')}: {getDefaultDisplayValue()})
|
||||
</small>
|
||||
)}
|
||||
</Label>
|
||||
@@ -132,13 +177,13 @@ function DynamicSlider({
|
||||
onChange={(value) => setInputValue(Number(value))}
|
||||
max={range ? range.max : (options?.length ?? 0) - 1}
|
||||
min={range ? range.min : 0}
|
||||
step={range ? range.step ?? 1 : 1}
|
||||
step={range ? (range.step ?? 1) : 1}
|
||||
controls={false}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 py-1 text-xs group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
@@ -146,13 +191,13 @@ function DynamicSlider({
|
||||
<Input
|
||||
id={`${settingKey}-dynamic-setting-input`}
|
||||
disabled={readonly}
|
||||
value={selectedValue ?? defaultValue}
|
||||
value={getDisplayValue(selectedValue)}
|
||||
onChange={() => ({})}
|
||||
className={cn(
|
||||
defaultTextProps,
|
||||
cn(
|
||||
optionText,
|
||||
'reset-rc-number-input reset-rc-number-input-text-right h-auto w-12 border-0 group-hover/temp:border-gray-200',
|
||||
'reset-rc-number-input h-auto w-14 border-0 py-1 pl-1 text-center text-xs group-hover/temp:border-gray-200',
|
||||
),
|
||||
)}
|
||||
/>
|
||||
@@ -164,19 +209,23 @@ function DynamicSlider({
|
||||
value={[
|
||||
isEnum
|
||||
? enumToNumeric[(selectedValue as number) ?? '']
|
||||
: (inputValue as number) ?? (defaultValue as number),
|
||||
: ((inputValue as number) ?? (defaultValue as number)),
|
||||
]}
|
||||
onValueChange={(value) => handleValueChange(value[0])}
|
||||
onDoubleClick={() => setInputValue(defaultValue as string | number)}
|
||||
max={max}
|
||||
min={range ? range.min : 0}
|
||||
step={range ? range.step ?? 1 : 1}
|
||||
step={range ? (range.step ?? 1) : 1}
|
||||
className="flex h-4 w-full"
|
||||
/>
|
||||
</HoverCardTrigger>
|
||||
{description && (
|
||||
<OptionHover
|
||||
description={descriptionCode ? localize(description as TranslationKeys) ?? description : description}
|
||||
description={
|
||||
descriptionCode
|
||||
? (localize(description as TranslationKeys) ?? description)
|
||||
: description
|
||||
}
|
||||
side={ESide.Left}
|
||||
/>
|
||||
)}
|
||||
|
||||
@@ -50,7 +50,7 @@ function DynamicSwitch({
|
||||
<div className="flex justify-between">
|
||||
<Label
|
||||
htmlFor={`${settingKey}-dynamic-switch`}
|
||||
className="text-left text-sm font-medium"
|
||||
className="break-words text-left text-sm font-medium"
|
||||
>
|
||||
{labelCode ? (localize(label as TranslationKeys) ?? label) : label || settingKey}{' '}
|
||||
{showDefault && (
|
||||
|
||||
@@ -6,6 +6,7 @@ import {
|
||||
QueryKeys,
|
||||
ContentTypes,
|
||||
EModelEndpoint,
|
||||
isAgentsEndpoint,
|
||||
parseCompactConvo,
|
||||
replaceSpecialVars,
|
||||
isAssistantsEndpoint,
|
||||
@@ -36,15 +37,6 @@ const logChatRequest = (request: Record<string, unknown>) => {
|
||||
logger.log('=====================================');
|
||||
};
|
||||
|
||||
const usesContentStream = (endpoint: EModelEndpoint | undefined, endpointType?: string) => {
|
||||
if (endpointType === EModelEndpoint.custom) {
|
||||
return true;
|
||||
}
|
||||
if (endpoint === EModelEndpoint.openAI || endpoint === EModelEndpoint.azureOpenAI) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
export default function useChatFunctions({
|
||||
index = 0,
|
||||
files,
|
||||
@@ -93,7 +85,7 @@ export default function useChatFunctions({
|
||||
messageId = null,
|
||||
},
|
||||
{
|
||||
editedText = null,
|
||||
editedContent = null,
|
||||
editedMessageId = null,
|
||||
isResubmission = false,
|
||||
isRegenerate = false,
|
||||
@@ -245,14 +237,11 @@ export default function useChatFunctions({
|
||||
setFilesToDelete({});
|
||||
}
|
||||
|
||||
const generation = editedText ?? latestMessage?.text ?? '';
|
||||
const responseText = isEditOrContinue ? generation : '';
|
||||
|
||||
const responseMessageId =
|
||||
editedMessageId ?? (latestMessage?.messageId ? latestMessage?.messageId + '_' : null) ?? null;
|
||||
const initialResponse: TMessage = {
|
||||
sender: responseSender,
|
||||
text: responseText,
|
||||
text: '',
|
||||
endpoint: endpoint ?? '',
|
||||
parentMessageId: isRegenerate ? messageId : intermediateId,
|
||||
messageId: responseMessageId ?? `${isRegenerate ? messageId : intermediateId}_`,
|
||||
@@ -272,34 +261,37 @@ export default function useChatFunctions({
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: responseText,
|
||||
value: '',
|
||||
},
|
||||
},
|
||||
];
|
||||
} else if (endpoint === EModelEndpoint.agents) {
|
||||
initialResponse.model = conversation?.agent_id ?? '';
|
||||
} else if (endpoint != null) {
|
||||
initialResponse.model = isAgentsEndpoint(endpoint)
|
||||
? (conversation?.agent_id ?? '')
|
||||
: (conversation?.model ?? '');
|
||||
initialResponse.text = '';
|
||||
initialResponse.content = [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: responseText,
|
||||
|
||||
if (editedContent && latestMessage?.content) {
|
||||
initialResponse.content = cloneDeep(latestMessage.content);
|
||||
const { index, text, type } = editedContent;
|
||||
if (initialResponse.content && index >= 0 && index < initialResponse.content.length) {
|
||||
const contentPart = initialResponse.content[index];
|
||||
if (type === ContentTypes.THINK && contentPart.type === ContentTypes.THINK) {
|
||||
contentPart[ContentTypes.THINK] = text;
|
||||
} else if (type === ContentTypes.TEXT && contentPart.type === ContentTypes.TEXT) {
|
||||
contentPart[ContentTypes.TEXT] = text;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
initialResponse.content = [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: '',
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
setShowStopButton(true);
|
||||
} else if (usesContentStream(endpoint, endpointType)) {
|
||||
initialResponse.text = '';
|
||||
initialResponse.content = [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
[ContentTypes.TEXT]: {
|
||||
value: responseText,
|
||||
},
|
||||
},
|
||||
];
|
||||
setShowStopButton(true);
|
||||
} else {
|
||||
];
|
||||
}
|
||||
setShowStopButton(true);
|
||||
}
|
||||
|
||||
@@ -316,7 +308,6 @@ export default function useChatFunctions({
|
||||
endpointOption,
|
||||
userMessage: {
|
||||
...currentMsg,
|
||||
generation,
|
||||
responseMessageId,
|
||||
overrideParentMessageId: isRegenerate ? messageId : null,
|
||||
},
|
||||
@@ -328,6 +319,7 @@ export default function useChatFunctions({
|
||||
initialResponse,
|
||||
isTemporary,
|
||||
ephemeralAgent,
|
||||
editedContent,
|
||||
};
|
||||
|
||||
if (isRegenerate) {
|
||||
|
||||
@@ -30,6 +30,14 @@ const useSetIndexOptions: TUseSetOptions = (preset = false) => {
|
||||
};
|
||||
}
|
||||
|
||||
// Auto-enable Responses API when web search is enabled
|
||||
if (param === 'web_search' && newValue === true) {
|
||||
const currentUseResponsesApi = conversation?.useResponsesApi ?? false;
|
||||
if (!currentUseResponsesApi) {
|
||||
update['useResponsesApi'] = true;
|
||||
}
|
||||
}
|
||||
|
||||
setConversation(
|
||||
(prevState) =>
|
||||
tConvoUpdateSchema.parse({
|
||||
|
||||
@@ -55,6 +55,26 @@ export default function useStepHandler({
|
||||
const messageMap = useRef(new Map<string, TMessage>());
|
||||
const stepMap = useRef(new Map<string, Agents.RunStep>());
|
||||
|
||||
const calculateContentIndex = (
|
||||
baseIndex: number,
|
||||
initialContent: TMessageContentParts[],
|
||||
incomingContentType: string,
|
||||
existingContent?: TMessageContentParts[],
|
||||
): number => {
|
||||
/** Only apply -1 adjustment for TEXT or THINK types when they match existing content */
|
||||
if (
|
||||
initialContent.length > 0 &&
|
||||
(incomingContentType === ContentTypes.TEXT || incomingContentType === ContentTypes.THINK)
|
||||
) {
|
||||
const targetIndex = baseIndex + initialContent.length - 1;
|
||||
const existingType = existingContent?.[targetIndex]?.type;
|
||||
if (existingType === incomingContentType) {
|
||||
return targetIndex;
|
||||
}
|
||||
}
|
||||
return baseIndex + initialContent.length;
|
||||
};
|
||||
|
||||
const updateContent = (
|
||||
message: TMessage,
|
||||
index: number,
|
||||
@@ -170,6 +190,11 @@ export default function useStepHandler({
|
||||
lastAnnouncementTimeRef.current = currentTime;
|
||||
}
|
||||
|
||||
let initialContent: TMessageContentParts[] = [];
|
||||
if (submission?.editedContent != null) {
|
||||
initialContent = submission?.initialResponse?.content ?? initialContent;
|
||||
}
|
||||
|
||||
if (event === 'on_run_step') {
|
||||
const runStep = data as Agents.RunStep;
|
||||
const responseMessageId = runStep.runId ?? '';
|
||||
@@ -189,7 +214,7 @@ export default function useStepHandler({
|
||||
parentMessageId: userMessage.messageId,
|
||||
conversationId: userMessage.conversationId,
|
||||
messageId: responseMessageId,
|
||||
content: [],
|
||||
content: initialContent,
|
||||
};
|
||||
|
||||
messageMap.current.set(responseMessageId, response);
|
||||
@@ -214,7 +239,9 @@ export default function useStepHandler({
|
||||
},
|
||||
};
|
||||
|
||||
updatedResponse = updateContent(updatedResponse, runStep.index, contentPart);
|
||||
/** Tool calls don't need index adjustment */
|
||||
const currentIndex = runStep.index + initialContent.length;
|
||||
updatedResponse = updateContent(updatedResponse, currentIndex, contentPart);
|
||||
});
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
@@ -234,7 +261,9 @@ export default function useStepHandler({
|
||||
|
||||
const response = messageMap.current.get(responseMessageId);
|
||||
if (response) {
|
||||
const updatedResponse = updateContent(response, agent_update.index, data);
|
||||
// Agent updates don't need index adjustment
|
||||
const currentIndex = agent_update.index + initialContent.length;
|
||||
const updatedResponse = updateContent(response, currentIndex, data);
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const currentMessages = getMessages() || [];
|
||||
setMessages([...currentMessages.slice(0, -1), updatedResponse]);
|
||||
@@ -255,7 +284,13 @@ export default function useStepHandler({
|
||||
? messageDelta.delta.content[0]
|
||||
: messageDelta.delta.content;
|
||||
|
||||
const updatedResponse = updateContent(response, runStep.index, contentPart);
|
||||
const currentIndex = calculateContentIndex(
|
||||
runStep.index,
|
||||
initialContent,
|
||||
contentPart.type || '',
|
||||
response.content,
|
||||
);
|
||||
const updatedResponse = updateContent(response, currentIndex, contentPart);
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const currentMessages = getMessages() || [];
|
||||
@@ -277,7 +312,13 @@ export default function useStepHandler({
|
||||
? reasoningDelta.delta.content[0]
|
||||
: reasoningDelta.delta.content;
|
||||
|
||||
const updatedResponse = updateContent(response, runStep.index, contentPart);
|
||||
const currentIndex = calculateContentIndex(
|
||||
runStep.index,
|
||||
initialContent,
|
||||
contentPart.type || '',
|
||||
response.content,
|
||||
);
|
||||
const updatedResponse = updateContent(response, currentIndex, contentPart);
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const currentMessages = getMessages() || [];
|
||||
@@ -318,7 +359,9 @@ export default function useStepHandler({
|
||||
contentPart.tool_call.expires_at = runStepDelta.delta.expires_at;
|
||||
}
|
||||
|
||||
updatedResponse = updateContent(updatedResponse, runStep.index, contentPart);
|
||||
/** Tool calls don't need index adjustment */
|
||||
const currentIndex = runStep.index + initialContent.length;
|
||||
updatedResponse = updateContent(updatedResponse, currentIndex, contentPart);
|
||||
});
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
@@ -350,7 +393,9 @@ export default function useStepHandler({
|
||||
tool_call: result.tool_call,
|
||||
};
|
||||
|
||||
updatedResponse = updateContent(updatedResponse, runStep.index, contentPart, true);
|
||||
/** Tool calls don't need index adjustment */
|
||||
const currentIndex = runStep.index + initialContent.length;
|
||||
updatedResponse = updateContent(updatedResponse, currentIndex, contentPart, true);
|
||||
|
||||
messageMap.current.set(responseMessageId, updatedResponse);
|
||||
const updatedMessages = messages.map((msg) =>
|
||||
|
||||
@@ -695,6 +695,5 @@
|
||||
"com_ui_versions": "الإصدارات",
|
||||
"com_ui_yes": "نعم",
|
||||
"com_ui_zoom": "تكبير",
|
||||
"com_user_message": "أنت",
|
||||
"com_warning_resubmit_unsupported": "إعادة إرسال رسالة الذكاء الاصطناعي غير مدعومة لنقطة النهاية هذه"
|
||||
"com_user_message": "أنت"
|
||||
}
|
||||
@@ -868,6 +868,5 @@
|
||||
"com_ui_x_selected": "{{0}} seleccionats",
|
||||
"com_ui_yes": "Sí",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Tu",
|
||||
"com_warning_resubmit_unsupported": "Tornar a enviar el missatge de la IA no està suportat per aquest endpoint."
|
||||
"com_user_message": "Tu"
|
||||
}
|
||||
@@ -720,6 +720,5 @@
|
||||
"com_ui_write": "Psát",
|
||||
"com_ui_yes": "Ano",
|
||||
"com_ui_zoom": "Přiblížit",
|
||||
"com_user_message": "Vy",
|
||||
"com_warning_resubmit_unsupported": "Opětovné odeslání AI zprávy není pro tento koncový bod podporováno."
|
||||
"com_user_message": "Vy"
|
||||
}
|
||||
@@ -823,6 +823,5 @@
|
||||
"com_ui_x_selected": "{{0}} udvalgt",
|
||||
"com_ui_yes": "Ja",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Du",
|
||||
"com_warning_resubmit_unsupported": "Genindsendelse af AI-beskeden understøttes ikke for dette slutpunkt."
|
||||
"com_user_message": "Du"
|
||||
}
|
||||
@@ -917,6 +917,5 @@
|
||||
"com_ui_x_selected": "{{0}} ausgewählt",
|
||||
"com_ui_yes": "Ja",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Du",
|
||||
"com_warning_resubmit_unsupported": "Das erneute Senden der KI-Nachricht wird für diesen Endpunkt nicht unterstützt."
|
||||
"com_user_message": "Du"
|
||||
}
|
||||
@@ -209,6 +209,7 @@
|
||||
"com_endpoint_google_thinking_budget": "Guides the number of thinking tokens the model uses. The actual amount may exceed or fall below this value depending on the prompt.\n\nThis setting is only supported by certain models (2.5 series). Gemini 2.5 Pro supports 128-32,768 tokens. Gemini 2.5 Flash supports 0-24,576 tokens. Gemini 2.5 Flash Lite supports 512-24,576 tokens.\n\nLeave blank or set to \"-1\" to let the model automatically decide when and how much to think. By default, Gemini 2.5 Flash Lite does not think.",
|
||||
"com_endpoint_google_topk": "Top-k changes how the model selects tokens for output. A top-k of 1 means the selected token is the most probable among all tokens in the model's vocabulary (also called greedy decoding), while a top-k of 3 means that the next token is selected from among the 3 most probable tokens (using temperature).",
|
||||
"com_endpoint_google_topp": "Top-p changes how the model selects tokens for output. Tokens are selected from most K (see topK parameter) probable to least until the sum of their probabilities equals the top-p value.",
|
||||
"com_endpoint_google_use_search_grounding": "Use Google's search grounding feature to enhance responses with real-time web search results. This enables models to access current information and provide more accurate, up-to-date answers.",
|
||||
"com_endpoint_instructions_assistants": "Override Instructions",
|
||||
"com_endpoint_instructions_assistants_placeholder": "Overrides the instructions of the assistant. This is useful for modifying the behavior on a per-run basis.",
|
||||
"com_endpoint_max_output_tokens": "Max Output Tokens",
|
||||
@@ -226,11 +227,14 @@
|
||||
"com_endpoint_openai_pres": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
||||
"com_endpoint_openai_prompt_prefix_placeholder": "Set custom instructions to include in System Message. Default: none",
|
||||
"com_endpoint_openai_reasoning_effort": "o1 and o3 models only: constrains effort on reasoning for reasoning models. Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.",
|
||||
"com_endpoint_openai_reasoning_summary": "Responses API only: A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. Set to none,auto, concise, or detailed.",
|
||||
"com_endpoint_openai_resend": "Resend all previously attached images. Note: this can significantly increase token cost and you may experience errors with many image attachments.",
|
||||
"com_endpoint_openai_resend_files": "Resend all previously attached files. Note: this will increase token cost and you may experience errors with many attachments.",
|
||||
"com_endpoint_openai_stop": "Up to 4 sequences where the API will stop generating further tokens.",
|
||||
"com_endpoint_openai_temp": "Higher values = more random, while lower values = more focused and deterministic. We recommend altering this or Top P but not both.",
|
||||
"com_endpoint_openai_topp": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We recommend altering this or temperature but not both.",
|
||||
"com_endpoint_openai_use_responses_api": "Use the Responses API instead of Chat Completions, which includes extended features from OpenAI. Required for o1-pro, o3-pro, and to enable reasoning summaries.",
|
||||
"com_endpoint_openai_use_web_search": "Enable web search functionality using OpenAI's built-in search capabilities. This allows the model to search the web for up-to-date information and provide more accurate, current responses.",
|
||||
"com_endpoint_output": "Output",
|
||||
"com_endpoint_plug_image_detail": "Image Detail",
|
||||
"com_endpoint_plug_resend_files": "Resend Files",
|
||||
@@ -261,6 +265,7 @@
|
||||
"com_endpoint_prompt_prefix_assistants_placeholder": "Set additional instructions or context on top of the Assistant's main instructions. Ignored if empty.",
|
||||
"com_endpoint_prompt_prefix_placeholder": "Set custom instructions or context. Ignored if empty.",
|
||||
"com_endpoint_reasoning_effort": "Reasoning Effort",
|
||||
"com_endpoint_reasoning_summary": "Reasoning Summary",
|
||||
"com_endpoint_save_as_preset": "Save As Preset",
|
||||
"com_endpoint_search": "Search endpoint by name",
|
||||
"com_endpoint_search_endpoint_models": "Search {{0}} models...",
|
||||
@@ -276,6 +281,8 @@
|
||||
"com_endpoint_top_k": "Top K",
|
||||
"com_endpoint_top_p": "Top P",
|
||||
"com_endpoint_use_active_assistant": "Use Active Assistant",
|
||||
"com_endpoint_use_responses_api": "Use Responses API",
|
||||
"com_endpoint_use_search_grounding": "Grounding with Google Search",
|
||||
"com_error_expired_user_key": "Provided key for {{0}} expired at {{1}}. Please provide a new key and try again.",
|
||||
"com_error_files_dupe": "Duplicate file detected.",
|
||||
"com_error_files_empty": "Empty files are not allowed.",
|
||||
@@ -284,6 +291,7 @@
|
||||
"com_error_files_upload": "An error occurred while uploading the file.",
|
||||
"com_error_files_upload_canceled": "The file upload request was canceled. Note: the file upload may still be processing and will need to be manually deleted.",
|
||||
"com_error_files_validation": "An error occurred while validating the file.",
|
||||
"com_error_google_tool_conflict": "Usage of built-in Google tools are not supported with external tools. Please disable either the built-in tools or the external tools.",
|
||||
"com_error_heic_conversion": "Failed to convert HEIC image to JPEG. Please try converting the image manually or use a different format.",
|
||||
"com_error_input_length": "The latest message token count is too long, exceeding the token limit, or your token limit parameters are misconfigured, adversely affecting the context window. More info: {{0}}. Please shorten your message, adjust the max context size from the conversation parameters, or fork the conversation to continue.",
|
||||
"com_error_invalid_agent_provider": "The \"{{0}}\" provider is not available for use with Agents. Please go to your agent's settings and select a currently available provider.",
|
||||
@@ -426,6 +434,9 @@
|
||||
"com_nav_lang_traditional_chinese": "繁體中文",
|
||||
"com_nav_lang_turkish": "Türkçe",
|
||||
"com_nav_lang_vietnamese": "Tiếng Việt",
|
||||
"com_nav_lang_armenian": "Հայերեն",
|
||||
"com_nav_lang_latvian": "Latviski",
|
||||
"com_nav_lang_uyghur": "Uyƣur tili",
|
||||
"com_nav_language": "Language",
|
||||
"com_nav_latex_parsing": "Parsing LaTeX in messages (may affect performance)",
|
||||
"com_nav_log_out": "Log out",
|
||||
@@ -634,6 +645,7 @@
|
||||
"com_ui_command_placeholder": "Optional: Enter a command for the prompt or name will be used",
|
||||
"com_ui_command_usage_placeholder": "Select a Prompt by command or name",
|
||||
"com_ui_complete_setup": "Complete Setup",
|
||||
"com_ui_concise": "Concise",
|
||||
"com_ui_configure_mcp_variables_for": "Configure Variables for {{0}}",
|
||||
"com_ui_confirm_action": "Confirm Action",
|
||||
"com_ui_confirm_admin_use_change": "Changing this setting will block access for admins, including yourself. Are you sure you want to proceed?",
|
||||
@@ -699,6 +711,7 @@
|
||||
"com_ui_description": "Description",
|
||||
"com_ui_description_placeholder": "Optional: Enter a description to display for the prompt",
|
||||
"com_ui_deselect_all": "Deselect All",
|
||||
"com_ui_detailed": "Detailed",
|
||||
"com_ui_disabling": "Disabling...",
|
||||
"com_ui_download": "Download",
|
||||
"com_ui_download_artifact": "Download Artifact",
|
||||
@@ -793,6 +806,7 @@
|
||||
"com_ui_happy_birthday": "It's my 1st birthday!",
|
||||
"com_ui_hide_image_details": "Hide Image Details",
|
||||
"com_ui_hide_qr": "Hide QR Code",
|
||||
"com_ui_high": "High",
|
||||
"com_ui_host": "Host",
|
||||
"com_ui_icon": "Icon",
|
||||
"com_ui_idea": "Ideas",
|
||||
@@ -820,6 +834,7 @@
|
||||
"com_ui_loading": "Loading...",
|
||||
"com_ui_locked": "Locked",
|
||||
"com_ui_logo": "{{0}} Logo",
|
||||
"com_ui_low": "Low",
|
||||
"com_ui_manage": "Manage",
|
||||
"com_ui_max_tags": "Maximum number allowed is {{0}}, using latest values.",
|
||||
"com_ui_mcp_dialog_desc": "Please enter the necessary information below.",
|
||||
@@ -827,6 +842,7 @@
|
||||
"com_ui_mcp_server_not_found": "Server not found.",
|
||||
"com_ui_mcp_servers": "MCP Servers",
|
||||
"com_ui_mcp_url": "MCP Server URL",
|
||||
"com_ui_medium": "Medium",
|
||||
"com_ui_memories": "Memories",
|
||||
"com_ui_memories_allow_create": "Allow creating Memories",
|
||||
"com_ui_memories_allow_opt_out": "Allow users to opt out of Memories",
|
||||
@@ -1058,6 +1074,5 @@
|
||||
"com_ui_x_selected": "{{0}} selected",
|
||||
"com_ui_yes": "Yes",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "You",
|
||||
"com_warning_resubmit_unsupported": "Resubmitting the AI message is not supported for this endpoint."
|
||||
}
|
||||
"com_user_message": "You"
|
||||
}
|
||||
|
||||
@@ -752,6 +752,5 @@
|
||||
"com_ui_x_selected": "{{0}} seleccionado",
|
||||
"com_ui_yes": "Sí",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Usted",
|
||||
"com_warning_resubmit_unsupported": "No se admite el reenvío del mensaje de IA para este punto de conexión."
|
||||
"com_user_message": "Usted"
|
||||
}
|
||||
@@ -865,6 +865,5 @@
|
||||
"com_ui_x_selected": "{{0}} valitud",
|
||||
"com_ui_yes": "Jah",
|
||||
"com_ui_zoom": "Suumi",
|
||||
"com_user_message": "Sina",
|
||||
"com_warning_resubmit_unsupported": "AI sõnumi uuesti esitamine pole selle otspunkti jaoks toetatud."
|
||||
"com_user_message": "Sina"
|
||||
}
|
||||
@@ -847,6 +847,5 @@
|
||||
"com_ui_write": "نوشتن",
|
||||
"com_ui_yes": "بله",
|
||||
"com_ui_zoom": "بزرگنمایی ضربه بزنید؛",
|
||||
"com_user_message": "شما",
|
||||
"com_warning_resubmit_unsupported": "ارسال مجدد پیام هوش مصنوعی برای این نقطه پایانی پشتیبانی نمی شود."
|
||||
"com_user_message": "شما"
|
||||
}
|
||||
@@ -752,6 +752,5 @@
|
||||
"com_ui_versions": "Versions",
|
||||
"com_ui_yes": "Oui",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Vous",
|
||||
"com_warning_resubmit_unsupported": "La resoumission du message IA n'est pas prise en charge pour ce point de terminaison."
|
||||
"com_user_message": "Vous"
|
||||
}
|
||||
@@ -863,6 +863,5 @@
|
||||
"com_ui_x_selected": "{{0}} נבחר",
|
||||
"com_ui_yes": "כן",
|
||||
"com_ui_zoom": "זום",
|
||||
"com_user_message": "אתה",
|
||||
"com_warning_resubmit_unsupported": "שליחת הודעה מחדש אינה נתמכת עבור נקודת קצה זו."
|
||||
"com_user_message": "אתה"
|
||||
}
|
||||
@@ -847,6 +847,5 @@
|
||||
"com_ui_write": "Írás",
|
||||
"com_ui_yes": "Igen",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Ön",
|
||||
"com_warning_resubmit_unsupported": "Az AI üzenet újraküldése nem támogatott ennél a végpontnál."
|
||||
"com_user_message": "Ön"
|
||||
}
|
||||
1
client/src/locales/hy/translation.json
Normal file
1
client/src/locales/hy/translation.json
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -22,13 +22,16 @@ import translationJa from './ja/translation.json';
|
||||
import translationKa from './ka/translation.json';
|
||||
import translationSv from './sv/translation.json';
|
||||
import translationKo from './ko/translation.json';
|
||||
import translationLv from './lv/translation.json';
|
||||
import translationTh from './th/translation.json';
|
||||
import translationTr from './tr/translation.json';
|
||||
import translationUg from './ug/translation.json';
|
||||
import translationVi from './vi/translation.json';
|
||||
import translationNl from './nl/translation.json';
|
||||
import translationId from './id/translation.json';
|
||||
import translationHe from './he/translation.json';
|
||||
import translationHu from './hu/translation.json';
|
||||
import translationHy from './hy/translation.json';
|
||||
import translationFi from './fi/translation.json';
|
||||
import translationZh_Hans from './zh-Hans/translation.json';
|
||||
import translationZh_Hant from './zh-Hant/translation.json';
|
||||
@@ -57,13 +60,16 @@ export const resources = {
|
||||
ka: { translation: translationKa },
|
||||
sv: { translation: translationSv },
|
||||
ko: { translation: translationKo },
|
||||
lv: { translation: translationLv },
|
||||
th: { translation: translationTh },
|
||||
tr: { translation: translationTr },
|
||||
ug: { translation: translationUg },
|
||||
vi: { translation: translationVi },
|
||||
nl: { translation: translationNl },
|
||||
id: { translation: translationId },
|
||||
he: { translation: translationHe },
|
||||
hu: { translation: translationHu },
|
||||
hy: { translation: translationHy },
|
||||
fi: { translation: translationFi },
|
||||
} as const;
|
||||
|
||||
|
||||
@@ -829,6 +829,5 @@
|
||||
"com_ui_write": "Scrittura",
|
||||
"com_ui_yes": "Sì",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Mostra nome utente nei messaggi",
|
||||
"com_warning_resubmit_unsupported": "Il reinvio del messaggio AI non è supportato per questo endpoint."
|
||||
"com_user_message": "Mostra nome utente nei messaggi"
|
||||
}
|
||||
@@ -868,6 +868,5 @@
|
||||
"com_ui_x_selected": "{{0}}が選択された",
|
||||
"com_ui_yes": "はい",
|
||||
"com_ui_zoom": "ズーム",
|
||||
"com_user_message": "あなた",
|
||||
"com_warning_resubmit_unsupported": "このエンドポイントではAIメッセージの再送信はサポートされていません"
|
||||
"com_user_message": "あなた"
|
||||
}
|
||||
@@ -921,6 +921,5 @@
|
||||
"com_ui_x_selected": "{{0}}개 선택됨",
|
||||
"com_ui_yes": "네",
|
||||
"com_ui_zoom": "확대/축소",
|
||||
"com_user_message": "당신",
|
||||
"com_warning_resubmit_unsupported": "이 엔드포인트에서는 AI 메시지 재전송이 지원되지 않습니다"
|
||||
"com_user_message": "당신"
|
||||
}
|
||||
1
client/src/locales/lv/translation.json
Normal file
1
client/src/locales/lv/translation.json
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -714,6 +714,5 @@
|
||||
"com_ui_view_source": "Zobacz źródłowy czat",
|
||||
"com_ui_yes": "Tak",
|
||||
"com_ui_zoom": "Powiększ",
|
||||
"com_user_message": "Ty",
|
||||
"com_warning_resubmit_unsupported": "Ponowne przesyłanie wiadomości AI nie jest obsługiwane dla tego punktu końcowego."
|
||||
"com_user_message": "Ty"
|
||||
}
|
||||
@@ -817,6 +817,5 @@
|
||||
"com_ui_write": "Escrevendo",
|
||||
"com_ui_yes": "Sim",
|
||||
"com_ui_zoom": "Zoom",
|
||||
"com_user_message": "Você",
|
||||
"com_warning_resubmit_unsupported": "O reenvio da mensagem de IA não é suportado para este endpoint."
|
||||
"com_user_message": "Você"
|
||||
}
|
||||
@@ -819,6 +819,5 @@
|
||||
"com_ui_write": "A escrever",
|
||||
"com_ui_yes": "Sim",
|
||||
"com_ui_zoom": "Ampliar",
|
||||
"com_user_message": "Você",
|
||||
"com_warning_resubmit_unsupported": "O reenvio da mensagem de IA não é suportado por este endereço."
|
||||
"com_user_message": "Você"
|
||||
}
|
||||
@@ -865,6 +865,5 @@
|
||||
"com_ui_x_selected": "{{0}} выбрано",
|
||||
"com_ui_yes": "Да",
|
||||
"com_ui_zoom": "Масштаб",
|
||||
"com_user_message": "Вы",
|
||||
"com_warning_resubmit_unsupported": "Повторная отправка сообщения ИИ не поддерживается для данной конечной точки"
|
||||
"com_user_message": "Вы"
|
||||
}
|
||||
@@ -802,6 +802,5 @@
|
||||
"com_ui_write": "การเขียน",
|
||||
"com_ui_yes": "ใช่",
|
||||
"com_ui_zoom": "ขยาย",
|
||||
"com_user_message": "คุณ",
|
||||
"com_warning_resubmit_unsupported": "การส่งข้อความ AI ซ้ำไม่รองรับสำหรับจุดสิ้นสุดนี้"
|
||||
"com_user_message": "คุณ"
|
||||
}
|
||||
@@ -725,6 +725,5 @@
|
||||
"com_ui_view_source": "Kaynak sohbeti görüntüle",
|
||||
"com_ui_yes": "Evet",
|
||||
"com_ui_zoom": "Yakınlaştır",
|
||||
"com_user_message": "Sen",
|
||||
"com_warning_resubmit_unsupported": "Bu uç nokta için yapay zeka mesajını yeniden gönderme desteklenmiyor."
|
||||
"com_user_message": "Sen"
|
||||
}
|
||||
1
client/src/locales/ug/translation.json
Normal file
1
client/src/locales/ug/translation.json
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
@@ -852,6 +852,5 @@
|
||||
"com_ui_x_selected": "{{0}} 已选择",
|
||||
"com_ui_yes": "是的",
|
||||
"com_ui_zoom": "缩放",
|
||||
"com_user_message": "您",
|
||||
"com_warning_resubmit_unsupported": "此终端不支持重新提交AI消息"
|
||||
"com_user_message": "您"
|
||||
}
|
||||
@@ -695,6 +695,5 @@
|
||||
"com_ui_versions": "版本",
|
||||
"com_ui_yes": "是",
|
||||
"com_ui_zoom": "縮放",
|
||||
"com_user_message": "您",
|
||||
"com_warning_resubmit_unsupported": "此端點不支援重新送出 AI 訊息。"
|
||||
"com_user_message": "您"
|
||||
}
|
||||
@@ -14,11 +14,14 @@ import {
|
||||
FileMapContext,
|
||||
SetConvoProvider,
|
||||
} from '~/Providers';
|
||||
import TermsAndConditionsModal from '~/components/ui/TermsAndConditionsModal';
|
||||
import TermsAndConditionsModal from '~/components/Chat/TermsAndConditionsModal';
|
||||
import { useUserTermsQuery, useGetStartupConfig } from '~/data-provider';
|
||||
import { Nav, MobileNav } from '~/components/Nav';
|
||||
import { useHealthCheck } from '~/data-provider';
|
||||
import { Banner } from '~/components/Banners';
|
||||
import { getTermsMarkdown } from '~/utils';
|
||||
import { useRecoilValue } from 'recoil';
|
||||
import store from '~/store';
|
||||
|
||||
export default function Root() {
|
||||
const [showTerms, setShowTerms] = useState(false);
|
||||
@@ -37,6 +40,9 @@ export default function Root() {
|
||||
const agentsMap = useAgentsMap({ isAuthenticated });
|
||||
const fileMap = useFileMap({ isAuthenticated });
|
||||
|
||||
const lang = useRecoilValue(store.lang);
|
||||
const modalContent = getTermsMarkdown(lang);
|
||||
|
||||
const { data: config } = useGetStartupConfig();
|
||||
const { data: termsData } = useUserTermsQuery({
|
||||
enabled: isAuthenticated && config?.interface?.termsOfService?.modalAcceptance === true,
|
||||
@@ -86,7 +92,7 @@ export default function Root() {
|
||||
onAccept={handleAcceptTerms}
|
||||
onDecline={handleDeclineTerms}
|
||||
title={config.interface.termsOfService.modalTitle}
|
||||
modalContent={config.interface.termsOfService.modalContent}
|
||||
modalContent={modalContent}
|
||||
/>
|
||||
)}
|
||||
</AssistantsMapContext.Provider>
|
||||
|
||||
@@ -15,6 +15,7 @@ export * from './languages';
|
||||
export * from './endpoints';
|
||||
export * from './localStorage';
|
||||
export * from './promptGroups';
|
||||
export * from './termsContent';
|
||||
export { default as cn } from './cn';
|
||||
export { default as logger } from './logger';
|
||||
export { default as buildTree } from './buildTree';
|
||||
|
||||
45
client/src/utils/termsContent.ts
Normal file
45
client/src/utils/termsContent.ts
Normal file
@@ -0,0 +1,45 @@
|
||||
import terms_en from '../../../terms/terms_en.md?raw';
|
||||
import terms_de from '../../../terms/terms_de.md?raw';
|
||||
import terms_fr from '../../../terms/terms_fr.md?raw';
|
||||
|
||||
/**
|
||||
* A mapping of language codes to their respective terms markdown content.
|
||||
*
|
||||
* You can add both base language codes (e.g. 'en') and full codes (e.g. 'pt-BR') if needed.
|
||||
*
|
||||
* @type {Record<string, string>}
|
||||
*/
|
||||
const markdownMap: Record<string, string> = {
|
||||
en: terms_en,
|
||||
de: terms_de,
|
||||
fr: terms_fr,
|
||||
// For example, to support Brazilian Portuguese, you could add:
|
||||
// 'pt-BR': terms_ptBR,
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves the terms markdown content for the specified language.
|
||||
*
|
||||
* The function first checks if an exact language code match exists in the markdown map.
|
||||
* If not, it attempts to extract the base language (e.g., 'pt' from 'pt-BR') and checks again.
|
||||
* If no match is found, it falls back to English.
|
||||
*
|
||||
* @param {string} lang - The language code, which may include a region (e.g., 'pt-BR', 'en-US').
|
||||
* @returns {string} The markdown content corresponding to the language,
|
||||
* or the English version if no matching language is found.
|
||||
*/
|
||||
export function getTermsMarkdown(lang: string): string {
|
||||
// Check for exact language code match (e.g., 'pt-BR').
|
||||
if (lang in markdownMap) {
|
||||
return markdownMap[lang];
|
||||
}
|
||||
|
||||
// Extract the base language (e.g., 'pt' from 'pt-BR') and check again.
|
||||
const baseLang = lang.split('-')[0];
|
||||
if (baseLang in markdownMap) {
|
||||
return markdownMap[baseLang];
|
||||
}
|
||||
|
||||
// Fall back to English if no match is found.
|
||||
return markdownMap['en'];
|
||||
}
|
||||
1
client/test/rawFileMock.js
Normal file
1
client/test/rawFileMock.js
Normal file
@@ -0,0 +1 @@
|
||||
module.exports = 'MOCK_MARKDOWN_CONTENT';
|
||||
@@ -13,7 +13,7 @@ services:
|
||||
- rag_api
|
||||
restart: always
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
- "host.docker.internal:host-gateway"
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
- ./images:/app/client/public/images
|
||||
- ./uploads:/app/uploads
|
||||
- ./logs:/app/api/logs
|
||||
|
||||
- ./terms:/app/terms
|
||||
client:
|
||||
image: nginx:1.27.0-alpine
|
||||
container_name: LibreChat-NGINX
|
||||
|
||||
@@ -27,6 +27,7 @@ services:
|
||||
- ./images:/app/client/public/images
|
||||
- ./uploads:/app/uploads
|
||||
- ./logs:/app/api/logs
|
||||
- ./terms:/app/terms
|
||||
mongodb:
|
||||
container_name: chat-mongodb
|
||||
image: mongo
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
// v0.7.8
|
||||
// v0.7.9-rc1
|
||||
// See .env.test.example for an example of the '.env.test' file.
|
||||
require('dotenv').config({ path: './e2e/.env.test' });
|
||||
|
||||
@@ -22,7 +22,7 @@ version: 1.8.9
|
||||
# It is recommended to use it with quotes.
|
||||
|
||||
# renovate: image=ghcr.io/danny-avila/librechat
|
||||
appVersion: "v0.7.8"
|
||||
appVersion: "v0.7.9-rc1"
|
||||
|
||||
home: https://www.librechat.ai
|
||||
|
||||
|
||||
@@ -27,42 +27,6 @@ interface:
|
||||
openNewTab: true
|
||||
modalAcceptance: true
|
||||
modalTitle: "Terms of Service for LibreChat"
|
||||
modalContent: |
|
||||
# Terms and Conditions for LibreChat
|
||||
|
||||
*Effective Date: February 18, 2024*
|
||||
|
||||
Welcome to LibreChat, the informational website for the open-source AI chat platform, available at https://librechat.ai. These Terms of Service ("Terms") govern your use of our website and the services we offer. By accessing or using the Website, you agree to be bound by these Terms and our Privacy Policy, accessible at https://librechat.ai//privacy.
|
||||
|
||||
## 1. Ownership
|
||||
|
||||
Upon purchasing a package from LibreChat, you are granted the right to download and use the code for accessing an admin panel for LibreChat. While you own the downloaded code, you are expressly prohibited from reselling, redistributing, or otherwise transferring the code to third parties without explicit permission from LibreChat.
|
||||
|
||||
## 2. User Data
|
||||
|
||||
We collect personal data, such as your name, email address, and payment information, as described in our Privacy Policy. This information is collected to provide and improve our services, process transactions, and communicate with you.
|
||||
|
||||
## 3. Non-Personal Data Collection
|
||||
|
||||
The Website uses cookies to enhance user experience, analyze site usage, and facilitate certain functionalities. By using the Website, you consent to the use of cookies in accordance with our Privacy Policy.
|
||||
|
||||
## 4. Use of the Website
|
||||
|
||||
You agree to use the Website only for lawful purposes and in a manner that does not infringe the rights of, restrict, or inhibit anyone else's use and enjoyment of the Website. Prohibited behavior includes harassing or causing distress or inconvenience to any person, transmitting obscene or offensive content, or disrupting the normal flow of dialogue within the Website.
|
||||
|
||||
## 5. Governing Law
|
||||
|
||||
These Terms shall be governed by and construed in accordance with the laws of the United States, without giving effect to any principles of conflicts of law.
|
||||
|
||||
## 6. Changes to the Terms
|
||||
|
||||
We reserve the right to modify these Terms at any time. We will notify users of any changes by email. Your continued use of the Website after such changes have been notified will constitute your consent to such changes.
|
||||
|
||||
## 7. Contact Information
|
||||
|
||||
If you have any questions about these Terms, please contact us at contact@librechat.ai.
|
||||
|
||||
By using the Website, you acknowledge that you have read these Terms of Service and agree to be bound by them.
|
||||
|
||||
endpointsMenu: true
|
||||
modelSelect: true
|
||||
|
||||
235
package-lock.json
generated
235
package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "LibreChat",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "LibreChat",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"license": "ISC",
|
||||
"workspaces": [
|
||||
"api",
|
||||
@@ -47,7 +47,7 @@
|
||||
},
|
||||
"api": {
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.52.0",
|
||||
@@ -64,7 +64,7 @@
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.51",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@node-saml/passport-saml": "^5.0.0",
|
||||
@@ -1313,44 +1313,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"api/node_modules/@langchain/openai": {
|
||||
"version": "0.5.14",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.14.tgz",
|
||||
"integrity": "sha512-0GEj5K/qi1MRuZ4nE7NvyI4jTG+RSewLZqsExUwRukWdeqmkPNHGrogTa5ZDt7eaJxAaY7EgLC5ZnvCM3L1oug==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"js-tiktoken": "^1.0.12",
|
||||
"openai": "^5.3.0",
|
||||
"zod": "^3.25.32"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"api/node_modules/@langchain/openai/node_modules/openai": {
|
||||
"version": "5.5.1",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.5.1.tgz",
|
||||
"integrity": "sha512-5i19097mGotHA1eFsM6Tjd/tJ8uo9sa5Ysv4Q6bKJ2vtN6rc0MzMrUefXnLXYAJcmMQrC1Efhj0AvfIkXrQamw==",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"ws": "^8.18.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"ws": {
|
||||
"optional": true
|
||||
},
|
||||
"zod": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"api/node_modules/@smithy/abort-controller": {
|
||||
"version": "4.0.2",
|
||||
"resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-4.0.2.tgz",
|
||||
@@ -2502,7 +2464,7 @@
|
||||
},
|
||||
"client": {
|
||||
"name": "@librechat/frontend",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@ariakit/react": "^0.4.15",
|
||||
@@ -18831,9 +18793,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/core": {
|
||||
"version": "0.3.60",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.60.tgz",
|
||||
"integrity": "sha512-FlUjO7ovGnaKcb2JPmw5ajPaZj18LVjh/vAURtdzzFy4UsYBLv/5Y3HJQ2KgDdrl6sW/UyfG0zWdnhZQ1A5eJw==",
|
||||
"version": "0.3.62",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/core/-/core-0.3.62.tgz",
|
||||
"integrity": "sha512-GqRTcoUPnozGRMUcA6QkP7LHL/OvanGdB51Jgb0w7IIPDI3wFugxMHZ4gphnGDtxsD1tQY5ykyEpYNxFK8kl1w==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@cfworker/json-schema": "^4.0.2",
|
||||
@@ -18892,44 +18854,6 @@
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/deepseek/node_modules/@langchain/openai": {
|
||||
"version": "0.5.14",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.14.tgz",
|
||||
"integrity": "sha512-0GEj5K/qi1MRuZ4nE7NvyI4jTG+RSewLZqsExUwRukWdeqmkPNHGrogTa5ZDt7eaJxAaY7EgLC5ZnvCM3L1oug==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"js-tiktoken": "^1.0.12",
|
||||
"openai": "^5.3.0",
|
||||
"zod": "^3.25.32"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/deepseek/node_modules/openai": {
|
||||
"version": "5.5.1",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.5.1.tgz",
|
||||
"integrity": "sha512-5i19097mGotHA1eFsM6Tjd/tJ8uo9sa5Ysv4Q6bKJ2vtN6rc0MzMrUefXnLXYAJcmMQrC1Efhj0AvfIkXrQamw==",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"ws": "^8.18.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"ws": {
|
||||
"optional": true
|
||||
},
|
||||
"zod": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/google-common": {
|
||||
"version": "0.2.13",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/google-common/-/google-common-0.2.13.tgz",
|
||||
@@ -19126,9 +19050,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/langgraph": {
|
||||
"version": "0.3.5",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/langgraph/-/langgraph-0.3.5.tgz",
|
||||
"integrity": "sha512-7astlgnp6BdMQJqmr+cbDgR10FYWNCaDLnbfEDHpqhKCCajU59m5snOdl4Vtu5UM6V2k3lgatNqWoflBtxhIyg==",
|
||||
"version": "0.3.6",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/langgraph/-/langgraph-0.3.6.tgz",
|
||||
"integrity": "sha512-TMRUEPb/eC5mS8XdY6gwLGX2druwFDxSWUQDXHHNsbrqhIrL3BPlw+UumjcKBQ8wvhk3gEspg4aHXGq8mAqbRA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@langchain/langgraph-checkpoint": "~0.0.18",
|
||||
@@ -19178,9 +19102,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/langgraph-sdk": {
|
||||
"version": "0.0.84",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/langgraph-sdk/-/langgraph-sdk-0.0.84.tgz",
|
||||
"integrity": "sha512-l0PFQyJ+6m6aclORNPPWlcRwgKcXVXsPaJCbCUYFABR3yf4cOpsjhUNR0cJ7+2cS400oieHjGRdGGyO/hbSjhg==",
|
||||
"version": "0.0.89",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/langgraph-sdk/-/langgraph-sdk-0.0.89.tgz",
|
||||
"integrity": "sha512-TFNFfhVxAljV4dFJa53otnT3Ox0uN24ZdW7AfV1rTPe4QTnonxlRGEUl3SSky1CaaVxYaHN9dJyn9zyhxr2jVQ==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/json-schema": "^7.0.15",
|
||||
@@ -19271,20 +19195,41 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/openai": {
|
||||
"version": "0.4.4",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.4.4.tgz",
|
||||
"integrity": "sha512-UZybJeMd8+UX7Kn47kuFYfqKdBCeBUWNqDtmAr6ZUIMMnlsNIb6MkrEEhGgAEjGCpdT4CU8U/DyyddTz+JayOQ==",
|
||||
"version": "0.5.16",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.16.tgz",
|
||||
"integrity": "sha512-TqzPE3PM0bMkQi53qs8vCFkwaEp3VgwGw+s1e8Nas5ICCZZtc2XqcDPz4hf2gpo1k7/AZd6HuPlAsDy6wye9Qw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"js-tiktoken": "^1.0.12",
|
||||
"openai": "^4.77.0",
|
||||
"zod": "^3.22.4",
|
||||
"zod-to-json-schema": "^3.22.3"
|
||||
"openai": "^5.3.0",
|
||||
"zod": "^3.25.32"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@langchain/core": ">=0.3.39 <0.4.0"
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/openai/node_modules/openai": {
|
||||
"version": "5.8.2",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.8.2.tgz",
|
||||
"integrity": "sha512-8C+nzoHYgyYOXhHGN6r0fcb4SznuEn1R7YZMvlqDbnCuE0FM2mm3T1HiYW6WIcMS/F1Of2up/cSPjLPaWt0X9Q==",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"ws": "^8.18.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"ws": {
|
||||
"optional": true
|
||||
},
|
||||
"zod": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/textsplitters": {
|
||||
@@ -19346,44 +19291,6 @@
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/xai/node_modules/@langchain/openai": {
|
||||
"version": "0.5.14",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.14.tgz",
|
||||
"integrity": "sha512-0GEj5K/qi1MRuZ4nE7NvyI4jTG+RSewLZqsExUwRukWdeqmkPNHGrogTa5ZDt7eaJxAaY7EgLC5ZnvCM3L1oug==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"js-tiktoken": "^1.0.12",
|
||||
"openai": "^5.3.0",
|
||||
"zod": "^3.25.32"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@langchain/xai/node_modules/openai": {
|
||||
"version": "5.5.1",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.5.1.tgz",
|
||||
"integrity": "sha512-5i19097mGotHA1eFsM6Tjd/tJ8uo9sa5Ysv4Q6bKJ2vtN6rc0MzMrUefXnLXYAJcmMQrC1Efhj0AvfIkXrQamw==",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"ws": "^8.18.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"ws": {
|
||||
"optional": true
|
||||
},
|
||||
"zod": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@lezer/common": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz",
|
||||
@@ -19436,22 +19343,22 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents": {
|
||||
"version": "2.4.46",
|
||||
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.46.tgz",
|
||||
"integrity": "sha512-zR27U19/WGF3HN64oBbiaFgjjWHaF7BjYzRFWzQKEkk+iEzCe59IpuEZUizQ54YcY02nhhh6S3MNUjhAJwMYVA==",
|
||||
"version": "2.4.51",
|
||||
"resolved": "https://registry.npmjs.org/@librechat/agents/-/agents-2.4.51.tgz",
|
||||
"integrity": "sha512-wmwas9/XvF+KSSez53iXx4f1yD4e2nDvqzv0kinGk9lbPIGIAOTCLKGOkS1lEHzSkKXUyGmYIzJFwaEqKm52fw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@langchain/anthropic": "^0.3.23",
|
||||
"@langchain/aws": "^0.1.11",
|
||||
"@langchain/community": "^0.3.47",
|
||||
"@langchain/core": "^0.3.60",
|
||||
"@langchain/core": "^0.3.62",
|
||||
"@langchain/deepseek": "^0.0.2",
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/langgraph": "^0.3.4",
|
||||
"@langchain/mistralai": "^0.2.1",
|
||||
"@langchain/ollama": "^0.2.3",
|
||||
"@langchain/openai": "^0.5.14",
|
||||
"@langchain/openai": "^0.5.16",
|
||||
"@langchain/xai": "^0.0.3",
|
||||
"cheerio": "^1.0.0",
|
||||
"dotenv": "^16.4.7",
|
||||
@@ -19463,9 +19370,9 @@
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/@langchain/community": {
|
||||
"version": "0.3.47",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/community/-/community-0.3.47.tgz",
|
||||
"integrity": "sha512-Vo42kAfkXpTFSevhEkeqqE55az8NyQgDktCbitXYuhipNbFYx08XVvqEDkFkB20MM/Z7u+cvLb+DxCqnKuH0CQ==",
|
||||
"version": "0.3.48",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/community/-/community-0.3.48.tgz",
|
||||
"integrity": "sha512-0KceBKSx34lL5cnbKybWIMQAFTgkZJMOzcZ1YdcagIwgoDa5a4MsJdtTABxaY0gu+87Uo3KqMj+GXx2wQqnZWA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@langchain/openai": ">=0.2.0 <0.6.0",
|
||||
@@ -19987,23 +19894,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/@langchain/openai": {
|
||||
"version": "0.5.15",
|
||||
"resolved": "https://registry.npmjs.org/@langchain/openai/-/openai-0.5.15.tgz",
|
||||
"integrity": "sha512-ANadEHyAj5sufQpz+SOPpKbyoMcTLhnh8/d+afbSPUqWsIMPpEFX3HoSY3nrBPG6l4NQQNG5P5oHb4SdC8+YIg==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"js-tiktoken": "^1.0.12",
|
||||
"openai": "^5.3.0",
|
||||
"zod": "^3.25.32"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@langchain/core": ">=0.3.58 <0.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/agent-base": {
|
||||
"version": "7.1.3",
|
||||
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz",
|
||||
@@ -20026,27 +19916,6 @@
|
||||
"node": ">= 14"
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/openai": {
|
||||
"version": "5.7.0",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-5.7.0.tgz",
|
||||
"integrity": "sha512-zXWawZl6J/P5Wz57/nKzVT3kJQZvogfuyuNVCdEp4/XU2UNrjL7SsuNpWAyLZbo6HVymwmnfno9toVzBhelygA==",
|
||||
"license": "Apache-2.0",
|
||||
"bin": {
|
||||
"openai": "bin/cli"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"ws": "^8.18.0",
|
||||
"zod": "^3.23.8"
|
||||
},
|
||||
"peerDependenciesMeta": {
|
||||
"ws": {
|
||||
"optional": true
|
||||
},
|
||||
"zod": {
|
||||
"optional": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/@librechat/agents/node_modules/uuid": {
|
||||
"version": "10.0.0",
|
||||
"resolved": "https://registry.npmjs.org/uuid/-/uuid-10.0.0.tgz",
|
||||
@@ -46624,7 +46493,7 @@
|
||||
"typescript": "^5.0.4"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.51",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.12.3",
|
||||
"axios": "^1.8.2",
|
||||
@@ -46717,7 +46586,7 @@
|
||||
},
|
||||
"packages/data-provider": {
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.7.89",
|
||||
"version": "0.7.899",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"axios": "^1.8.2",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "LibreChat",
|
||||
"version": "v0.7.8",
|
||||
"version": "v0.7.9-rc1",
|
||||
"description": "",
|
||||
"workspaces": [
|
||||
"api",
|
||||
@@ -115,6 +115,7 @@
|
||||
"typescript-eslint": "^8.24.0"
|
||||
},
|
||||
"overrides": {
|
||||
"@langchain/openai": "^0.5.16",
|
||||
"axios": "1.8.2",
|
||||
"elliptic": "^6.6.1",
|
||||
"mdast-util-gfm-autolink-literal": "2.0.0",
|
||||
|
||||
@@ -69,7 +69,7 @@
|
||||
"registry": "https://registry.npmjs.org/"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@librechat/agents": "^2.4.46",
|
||||
"@librechat/agents": "^2.4.51",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@modelcontextprotocol/sdk": "^1.12.3",
|
||||
"axios": "^1.8.2",
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Run, Providers } from '@librechat/agents';
|
||||
import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider';
|
||||
import type {
|
||||
OpenAIClientOptions,
|
||||
StandardGraphConfig,
|
||||
EventHandler,
|
||||
GenericTool,
|
||||
@@ -76,6 +77,11 @@ export async function createRun({
|
||||
(agent.endpoint && agent.endpoint.toLowerCase().includes(KnownEndpoints.openrouter))
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
} else if (
|
||||
(llmConfig as OpenAIClientOptions).useResponsesApi === true &&
|
||||
(provider === Providers.OPENAI || provider === Providers.AZURE)
|
||||
) {
|
||||
reasoningKey = 'reasoning';
|
||||
}
|
||||
|
||||
const graphConfig: StandardGraphConfig = {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { Providers } from '@librechat/agents';
|
||||
import { googleSettings, AuthKeys } from 'librechat-data-provider';
|
||||
import type { GoogleClientOptions, VertexAIClientOptions } from '@librechat/agents';
|
||||
import type { GoogleAIToolType } from '@langchain/google-common';
|
||||
import type * as t from '~/types';
|
||||
import { isEnabled } from '~/utils';
|
||||
|
||||
@@ -98,13 +99,14 @@ export function getGoogleConfig(
|
||||
const serviceKey =
|
||||
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
|
||||
|
||||
const project_id = serviceKey?.project_id ?? null;
|
||||
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;
|
||||
const project_id = !apiKey ? (serviceKey?.project_id ?? null) : null;
|
||||
|
||||
const reverseProxyUrl = options.reverseProxyUrl;
|
||||
const authHeader = options.authHeader;
|
||||
|
||||
const {
|
||||
grounding,
|
||||
thinking = googleSettings.thinking.default,
|
||||
thinkingBudget = googleSettings.thinkingBudget.default,
|
||||
...modelOptions
|
||||
@@ -128,7 +130,7 @@ export function getGoogleConfig(
|
||||
}
|
||||
|
||||
// If we have a GCP project => Vertex AI
|
||||
if (project_id && provider === Providers.VERTEXAI) {
|
||||
if (provider === Providers.VERTEXAI) {
|
||||
(llmConfig as VertexAIClientOptions).authOptions = {
|
||||
credentials: { ...serviceKey },
|
||||
projectId: project_id,
|
||||
@@ -136,6 +138,10 @@ export function getGoogleConfig(
|
||||
(llmConfig as VertexAIClientOptions).location = process.env.GOOGLE_LOC || 'us-central1';
|
||||
} else if (apiKey && provider === Providers.GOOGLE) {
|
||||
llmConfig.apiKey = apiKey;
|
||||
} else {
|
||||
throw new Error(
|
||||
`Invalid credentials provided. Please provide either a valid API key or service account credentials for Google Cloud.`,
|
||||
);
|
||||
}
|
||||
|
||||
const shouldEnableThinking =
|
||||
@@ -183,8 +189,16 @@ export function getGoogleConfig(
|
||||
};
|
||||
}
|
||||
|
||||
const tools: GoogleAIToolType[] = [];
|
||||
|
||||
if (grounding) {
|
||||
tools.push({ googleSearch: {} });
|
||||
}
|
||||
|
||||
// Return the final shape
|
||||
return {
|
||||
/** @type {GoogleAIToolType[]} */
|
||||
tools,
|
||||
/** @type {Providers.GOOGLE | Providers.VERTEXAI} */
|
||||
provider,
|
||||
/** @type {GoogleClientOptions | VertexAIClientOptions} */
|
||||
|
||||
@@ -1,9 +1,25 @@
|
||||
import { ProxyAgent } from 'undici';
|
||||
import { KnownEndpoints } from 'librechat-data-provider';
|
||||
import { KnownEndpoints, removeNullishValues } from 'librechat-data-provider';
|
||||
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
||||
import type { AzureOpenAIInput } from '@langchain/openai';
|
||||
import type { OpenAI } from 'openai';
|
||||
import type * as t from '~/types';
|
||||
import { sanitizeModelName, constructAzureURL } from '~/utils/azure';
|
||||
import { isEnabled } from '~/utils/common';
|
||||
|
||||
function hasReasoningParams({
|
||||
reasoning_effort,
|
||||
reasoning_summary,
|
||||
}: {
|
||||
reasoning_effort?: string | null;
|
||||
reasoning_summary?: string | null;
|
||||
}): boolean {
|
||||
return (
|
||||
(reasoning_effort != null && reasoning_effort !== '') ||
|
||||
(reasoning_summary != null && reasoning_summary !== '')
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates configuration options for creating a language model (LLM) instance.
|
||||
* @param apiKey - The API key for authentication.
|
||||
@@ -17,7 +33,7 @@ export function getOpenAIConfig(
|
||||
endpoint?: string | null,
|
||||
): t.LLMConfigResult {
|
||||
const {
|
||||
modelOptions = {},
|
||||
modelOptions: _modelOptions = {},
|
||||
reverseProxyUrl,
|
||||
defaultQuery,
|
||||
headers,
|
||||
@@ -27,8 +43,10 @@ export function getOpenAIConfig(
|
||||
addParams,
|
||||
dropParams,
|
||||
} = options;
|
||||
|
||||
const llmConfig: Partial<t.ClientOptions> & Partial<t.OpenAIParameters> = Object.assign(
|
||||
const { reasoning_effort, reasoning_summary, ...modelOptions } = _modelOptions;
|
||||
const llmConfig: Partial<t.ClientOptions> &
|
||||
Partial<t.OpenAIParameters> &
|
||||
Partial<AzureOpenAIInput> = Object.assign(
|
||||
{
|
||||
streaming,
|
||||
model: modelOptions.model ?? '',
|
||||
@@ -40,39 +58,6 @@ export function getOpenAIConfig(
|
||||
Object.assign(llmConfig, addParams);
|
||||
}
|
||||
|
||||
// Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
|
||||
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
|
||||
const searchExcludeParams = [
|
||||
'frequency_penalty',
|
||||
'presence_penalty',
|
||||
'temperature',
|
||||
'top_p',
|
||||
'top_k',
|
||||
'stop',
|
||||
'logit_bias',
|
||||
'seed',
|
||||
'response_format',
|
||||
'n',
|
||||
'logprobs',
|
||||
'user',
|
||||
];
|
||||
|
||||
const updatedDropParams = dropParams || [];
|
||||
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
|
||||
|
||||
combinedDropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
} else if (dropParams && Array.isArray(dropParams)) {
|
||||
dropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let useOpenRouter = false;
|
||||
const configOptions: t.OpenAIConfiguration = {};
|
||||
|
||||
@@ -119,7 +104,10 @@ export function getOpenAIConfig(
|
||||
llmConfig.model = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
}
|
||||
|
||||
if (configOptions.baseURL) {
|
||||
const constructBaseURL = () => {
|
||||
if (!configOptions.baseURL) {
|
||||
return;
|
||||
}
|
||||
const azureURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL,
|
||||
azureOptions: updatedAzure,
|
||||
@@ -127,9 +115,40 @@ export function getOpenAIConfig(
|
||||
updatedAzure.azureOpenAIBasePath = azureURL.split(
|
||||
`/${updatedAzure.azureOpenAIApiDeploymentName}`,
|
||||
)[0];
|
||||
}
|
||||
};
|
||||
|
||||
constructBaseURL();
|
||||
Object.assign(llmConfig, updatedAzure);
|
||||
|
||||
const constructAzureResponsesApi = () => {
|
||||
if (!llmConfig.useResponsesApi) {
|
||||
return;
|
||||
}
|
||||
|
||||
configOptions.baseURL = constructAzureURL({
|
||||
baseURL: configOptions.baseURL || 'https://${INSTANCE_NAME}.openai.azure.com/openai/v1',
|
||||
azureOptions: llmConfig,
|
||||
});
|
||||
|
||||
delete llmConfig.azureOpenAIApiDeploymentName;
|
||||
delete llmConfig.azureOpenAIApiInstanceName;
|
||||
delete llmConfig.azureOpenAIApiVersion;
|
||||
delete llmConfig.azureOpenAIBasePath;
|
||||
delete llmConfig.azureOpenAIApiKey;
|
||||
llmConfig.apiKey = apiKey;
|
||||
|
||||
configOptions.defaultHeaders = {
|
||||
...configOptions.defaultHeaders,
|
||||
'api-key': apiKey,
|
||||
};
|
||||
configOptions.defaultQuery = {
|
||||
...configOptions.defaultQuery,
|
||||
'api-version': 'preview',
|
||||
};
|
||||
};
|
||||
|
||||
constructAzureResponsesApi();
|
||||
|
||||
llmConfig.model = updatedAzure.azureOpenAIApiDeploymentName;
|
||||
} else {
|
||||
llmConfig.apiKey = apiKey;
|
||||
@@ -139,11 +158,19 @@ export function getOpenAIConfig(
|
||||
configOptions.organization = process.env.OPENAI_ORGANIZATION;
|
||||
}
|
||||
|
||||
if (useOpenRouter && llmConfig.reasoning_effort != null) {
|
||||
llmConfig.reasoning = {
|
||||
effort: llmConfig.reasoning_effort,
|
||||
};
|
||||
delete llmConfig.reasoning_effort;
|
||||
if (
|
||||
hasReasoningParams({ reasoning_effort, reasoning_summary }) &&
|
||||
(llmConfig.useResponsesApi === true || useOpenRouter)
|
||||
) {
|
||||
llmConfig.reasoning = removeNullishValues(
|
||||
{
|
||||
effort: reasoning_effort,
|
||||
summary: reasoning_summary,
|
||||
},
|
||||
true,
|
||||
) as OpenAI.Reasoning;
|
||||
} else if (hasReasoningParams({ reasoning_effort })) {
|
||||
llmConfig.reasoning_effort = reasoning_effort;
|
||||
}
|
||||
|
||||
if (llmConfig.max_tokens != null) {
|
||||
@@ -151,8 +178,53 @@ export function getOpenAIConfig(
|
||||
delete llmConfig.max_tokens;
|
||||
}
|
||||
|
||||
const tools: BindToolsInput[] = [];
|
||||
|
||||
if (modelOptions.web_search) {
|
||||
llmConfig.useResponsesApi = true;
|
||||
tools.push({ type: 'web_search_preview' });
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: OpenAI Web Search models do not support any known parameters besides `max_tokens`
|
||||
*/
|
||||
if (modelOptions.model && /gpt-4o.*search/.test(modelOptions.model)) {
|
||||
const searchExcludeParams = [
|
||||
'frequency_penalty',
|
||||
'presence_penalty',
|
||||
'reasoning',
|
||||
'reasoning_effort',
|
||||
'temperature',
|
||||
'top_p',
|
||||
'top_k',
|
||||
'stop',
|
||||
'logit_bias',
|
||||
'seed',
|
||||
'response_format',
|
||||
'n',
|
||||
'logprobs',
|
||||
'user',
|
||||
];
|
||||
|
||||
const updatedDropParams = dropParams || [];
|
||||
const combinedDropParams = [...new Set([...updatedDropParams, ...searchExcludeParams])];
|
||||
|
||||
combinedDropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
} else if (dropParams && Array.isArray(dropParams)) {
|
||||
dropParams.forEach((param) => {
|
||||
if (param in llmConfig) {
|
||||
delete llmConfig[param as keyof t.ClientOptions];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
llmConfig,
|
||||
configOptions,
|
||||
tools,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import type {
|
||||
OCRImage,
|
||||
} from '~/types';
|
||||
import { logAxiosError, createAxiosInstance } from '~/utils/axios';
|
||||
import { loadServiceKey } from '~/utils/key';
|
||||
|
||||
const axios = createAxiosInstance();
|
||||
const DEFAULT_MISTRAL_BASE_URL = 'https://api.mistral.ai/v1';
|
||||
@@ -443,27 +444,24 @@ async function loadGoogleAuthConfig(): Promise<{
|
||||
const serviceKeyPath =
|
||||
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
|
||||
path.join(__dirname, '..', '..', '..', 'api', 'data', 'auth.json');
|
||||
const absolutePath = path.isAbsolute(serviceKeyPath)
|
||||
? serviceKeyPath
|
||||
: path.resolve(serviceKeyPath);
|
||||
|
||||
let serviceKey: GoogleServiceAccount;
|
||||
try {
|
||||
const authJsonContent = fs.readFileSync(absolutePath, 'utf8');
|
||||
serviceKey = JSON.parse(authJsonContent) as GoogleServiceAccount;
|
||||
} catch {
|
||||
throw new Error(`Google service account not found at ${absolutePath}`);
|
||||
const serviceKey = await loadServiceKey(serviceKeyPath);
|
||||
|
||||
if (!serviceKey) {
|
||||
throw new Error(
|
||||
`Google service account not found or could not be loaded from ${serviceKeyPath}`,
|
||||
);
|
||||
}
|
||||
|
||||
if (!serviceKey.client_email || !serviceKey.private_key || !serviceKey.project_id) {
|
||||
throw new Error('Invalid Google service account configuration');
|
||||
}
|
||||
|
||||
const jwt = await createJWT(serviceKey);
|
||||
const jwt = await createJWT(serviceKey as GoogleServiceAccount);
|
||||
const accessToken = await exchangeJWTForAccessToken(jwt);
|
||||
|
||||
return {
|
||||
serviceAccount: serviceKey,
|
||||
serviceAccount: serviceKey as GoogleServiceAccount,
|
||||
accessToken,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { z } from 'zod';
|
||||
import { openAISchema, EModelEndpoint } from 'librechat-data-provider';
|
||||
import type { TEndpointOption, TAzureConfig, TEndpoint } from 'librechat-data-provider';
|
||||
import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
|
||||
import type { OpenAIClientOptions } from '@librechat/agents';
|
||||
import type { AzureOptions } from './azure';
|
||||
|
||||
@@ -33,6 +34,7 @@ export type ClientOptions = OpenAIClientOptions & {
|
||||
export interface LLMConfigResult {
|
||||
llmConfig: ClientOptions;
|
||||
configOptions: OpenAIConfiguration;
|
||||
tools?: BindToolsInput[];
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import type { Providers } from '@librechat/agents';
|
||||
import type { Providers, ClientOptions } from '@librechat/agents';
|
||||
import type { AgentModelParameters } from 'librechat-data-provider';
|
||||
import type { OpenAIConfiguration } from './openai';
|
||||
|
||||
@@ -8,4 +8,5 @@ export type RunLLMConfig = {
|
||||
streamUsage: boolean;
|
||||
usage?: boolean;
|
||||
configuration?: OpenAIConfiguration;
|
||||
} & AgentModelParameters;
|
||||
} & AgentModelParameters &
|
||||
ClientOptions;
|
||||
|
||||
@@ -5,6 +5,7 @@ export * from './env';
|
||||
export * from './events';
|
||||
export * from './files';
|
||||
export * from './generators';
|
||||
export * from './key';
|
||||
export * from './llm';
|
||||
export * from './math';
|
||||
export * from './openid';
|
||||
|
||||
70
packages/api/src/utils/key.ts
Normal file
70
packages/api/src/utils/key.ts
Normal file
@@ -0,0 +1,70 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import axios from 'axios';
|
||||
import { logger } from '@librechat/data-schemas';
|
||||
|
||||
export interface GoogleServiceKey {
|
||||
type?: string;
|
||||
project_id?: string;
|
||||
private_key_id?: string;
|
||||
private_key?: string;
|
||||
client_email?: string;
|
||||
client_id?: string;
|
||||
auth_uri?: string;
|
||||
token_uri?: string;
|
||||
auth_provider_x509_cert_url?: string;
|
||||
client_x509_cert_url?: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load Google service key from file path or URL
|
||||
* @param keyPath - The path or URL to the service key file
|
||||
* @returns The parsed service key object or null if failed
|
||||
*/
|
||||
export async function loadServiceKey(keyPath: string): Promise<GoogleServiceKey | null> {
|
||||
if (!keyPath) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let serviceKey: unknown;
|
||||
|
||||
// Check if it's a URL
|
||||
if (/^https?:\/\//.test(keyPath)) {
|
||||
try {
|
||||
const response = await axios.get(keyPath);
|
||||
serviceKey = response.data;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to fetch the service key from URL: ${keyPath}`, error);
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
// It's a file path
|
||||
try {
|
||||
const absolutePath = path.isAbsolute(keyPath) ? keyPath : path.resolve(keyPath);
|
||||
const fileContent = fs.readFileSync(absolutePath, 'utf8');
|
||||
serviceKey = JSON.parse(fileContent);
|
||||
} catch (error) {
|
||||
logger.error(`Failed to load service key from file: ${keyPath}`, error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// If the response is a string (e.g., from a URL that returns JSON as text), parse it
|
||||
if (typeof serviceKey === 'string') {
|
||||
try {
|
||||
serviceKey = JSON.parse(serviceKey);
|
||||
} catch (parseError) {
|
||||
logger.error(`Failed to parse service key JSON from ${keyPath}`, parseError);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// Validate the service key has required fields
|
||||
if (!serviceKey || typeof serviceKey !== 'object') {
|
||||
logger.error(`Invalid service key format from ${keyPath}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return serviceKey as GoogleServiceKey;
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "librechat-data-provider",
|
||||
"version": "0.7.89",
|
||||
"version": "0.7.899",
|
||||
"description": "data services for librechat apps",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/index.es.js",
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
/* eslint-disable jest/no-conditional-expect */
|
||||
import { ZodError, z } from 'zod';
|
||||
import { generateDynamicSchema, validateSettingDefinitions, OptionTypes } from '../src/generate';
|
||||
import type { SettingsConfiguration } from '../src/generate';
|
||||
@@ -97,6 +96,37 @@ describe('generateDynamicSchema', () => {
|
||||
expect(result['data']).toEqual({ testEnum: 'option2' });
|
||||
});
|
||||
|
||||
it('should generate a schema for enum settings with empty string option', () => {
|
||||
const settings: SettingsConfiguration = [
|
||||
{
|
||||
key: 'testEnumWithEmpty',
|
||||
description: 'A test enum setting with empty string',
|
||||
type: 'enum',
|
||||
default: '',
|
||||
options: ['', 'option1', 'option2'],
|
||||
enumMappings: {
|
||||
'': 'None',
|
||||
option1: 'First Option',
|
||||
option2: 'Second Option',
|
||||
},
|
||||
component: 'slider',
|
||||
columnSpan: 2,
|
||||
label: 'Test Enum with Empty String',
|
||||
},
|
||||
];
|
||||
|
||||
const schema = generateDynamicSchema(settings);
|
||||
const result = schema.safeParse({ testEnumWithEmpty: '' });
|
||||
|
||||
expect(result.success).toBeTruthy();
|
||||
expect(result['data']).toEqual({ testEnumWithEmpty: '' });
|
||||
|
||||
// Test with non-empty option
|
||||
const result2 = schema.safeParse({ testEnumWithEmpty: 'option1' });
|
||||
expect(result2.success).toBeTruthy();
|
||||
expect(result2['data']).toEqual({ testEnumWithEmpty: 'option1' });
|
||||
});
|
||||
|
||||
it('should fail for incorrect enum value', () => {
|
||||
const settings: SettingsConfiguration = [
|
||||
{
|
||||
@@ -481,6 +511,47 @@ describe('validateSettingDefinitions', () => {
|
||||
|
||||
expect(() => validateSettingDefinitions(settingsExceedingMaxTags)).toThrow(ZodError);
|
||||
});
|
||||
|
||||
// Test for incomplete enumMappings
|
||||
test('should throw error for incomplete enumMappings', () => {
|
||||
const settingsWithIncompleteEnumMappings: SettingsConfiguration = [
|
||||
{
|
||||
key: 'displayMode',
|
||||
type: 'enum',
|
||||
component: 'dropdown',
|
||||
options: ['light', 'dark', 'auto'],
|
||||
enumMappings: {
|
||||
light: 'Light Mode',
|
||||
dark: 'Dark Mode',
|
||||
// Missing mapping for 'auto'
|
||||
},
|
||||
optionType: OptionTypes.Custom,
|
||||
},
|
||||
];
|
||||
|
||||
expect(() => validateSettingDefinitions(settingsWithIncompleteEnumMappings)).toThrow(ZodError);
|
||||
});
|
||||
|
||||
// Test for complete enumMappings including empty string
|
||||
test('should not throw error for complete enumMappings including empty string', () => {
|
||||
const settingsWithCompleteEnumMappings: SettingsConfiguration = [
|
||||
{
|
||||
key: 'selectionMode',
|
||||
type: 'enum',
|
||||
component: 'slider',
|
||||
options: ['', 'single', 'multiple'],
|
||||
enumMappings: {
|
||||
'': 'None',
|
||||
single: 'Single Selection',
|
||||
multiple: 'Multiple Selection',
|
||||
},
|
||||
default: '',
|
||||
optionType: OptionTypes.Custom,
|
||||
},
|
||||
];
|
||||
|
||||
expect(() => validateSettingDefinitions(settingsWithCompleteEnumMappings)).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
const settingsConfiguration: SettingsConfiguration = [
|
||||
@@ -515,7 +586,7 @@ const settingsConfiguration: SettingsConfiguration = [
|
||||
{
|
||||
key: 'presence_penalty',
|
||||
description:
|
||||
'Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics.',
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
|
||||
type: 'number',
|
||||
default: 0,
|
||||
range: {
|
||||
@@ -529,7 +600,7 @@ const settingsConfiguration: SettingsConfiguration = [
|
||||
{
|
||||
key: 'frequency_penalty',
|
||||
description:
|
||||
'Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim.',
|
||||
"Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
|
||||
type: 'number',
|
||||
default: 0,
|
||||
range: {
|
||||
|
||||
@@ -477,7 +477,6 @@ const termsOfServiceSchema = z.object({
|
||||
openNewTab: z.boolean().optional(),
|
||||
modalAcceptance: z.boolean().optional(),
|
||||
modalTitle: z.string().optional(),
|
||||
modalContent: z.string().or(z.array(z.string())).optional(),
|
||||
});
|
||||
|
||||
export type TTermsOfService = z.infer<typeof termsOfServiceSchema>;
|
||||
@@ -1257,6 +1256,10 @@ export enum ErrorTypes {
|
||||
* Google provider returned an error
|
||||
*/
|
||||
GOOGLE_ERROR = 'google_error',
|
||||
/**
|
||||
* Google provider does not allow custom tools with built-in tools
|
||||
*/
|
||||
GOOGLE_TOOL_CONFLICT = 'google_tool_conflict',
|
||||
/**
|
||||
* Invalid Agent Provider (excluded by Admin)
|
||||
*/
|
||||
@@ -1379,7 +1382,7 @@ export enum TTSProviders {
|
||||
/** Enum for app-wide constants */
|
||||
export enum Constants {
|
||||
/** Key for the app's version. */
|
||||
VERSION = 'v0.7.8',
|
||||
VERSION = 'v0.7.9-rc1',
|
||||
/** Key for the Custom Config's version (librechat.yaml). */
|
||||
CONFIG_VERSION = '1.2.8',
|
||||
/** Standard value for the first message's `parentMessageId` value, to indicate no parent exists. */
|
||||
|
||||
@@ -11,6 +11,7 @@ export default function createPayload(submission: t.TSubmission) {
|
||||
isContinued,
|
||||
isTemporary,
|
||||
ephemeralAgent,
|
||||
editedContent,
|
||||
} = submission;
|
||||
const { conversationId } = s.tConvoUpdateSchema.parse(conversation);
|
||||
const { endpoint: _e, endpointType } = endpointOption as {
|
||||
@@ -34,6 +35,7 @@ export default function createPayload(submission: t.TSubmission) {
|
||||
isContinued: !!(isEdited && isContinued),
|
||||
conversationId,
|
||||
isTemporary,
|
||||
editedContent,
|
||||
};
|
||||
|
||||
return { server, payload };
|
||||
|
||||
@@ -467,7 +467,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
|
||||
}
|
||||
|
||||
/* Default value checks */
|
||||
if (setting.type === SettingTypes.Number && isNaN(setting.default as number) && setting.default != null) {
|
||||
if (
|
||||
setting.type === SettingTypes.Number &&
|
||||
isNaN(setting.default as number) &&
|
||||
setting.default != null
|
||||
) {
|
||||
errors.push({
|
||||
code: ZodIssueCode.custom,
|
||||
message: `Invalid default value for setting ${setting.key}. Must be a number.`,
|
||||
@@ -475,7 +479,11 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
|
||||
});
|
||||
}
|
||||
|
||||
if (setting.type === SettingTypes.Boolean && typeof setting.default !== 'boolean' && setting.default != null) {
|
||||
if (
|
||||
setting.type === SettingTypes.Boolean &&
|
||||
typeof setting.default !== 'boolean' &&
|
||||
setting.default != null
|
||||
) {
|
||||
errors.push({
|
||||
code: ZodIssueCode.custom,
|
||||
message: `Invalid default value for setting ${setting.key}. Must be a boolean.`,
|
||||
@@ -485,7 +493,8 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
|
||||
|
||||
if (
|
||||
(setting.type === SettingTypes.String || setting.type === SettingTypes.Enum) &&
|
||||
typeof setting.default !== 'string' && setting.default != null
|
||||
typeof setting.default !== 'string' &&
|
||||
setting.default != null
|
||||
) {
|
||||
errors.push({
|
||||
code: ZodIssueCode.custom,
|
||||
@@ -520,6 +529,19 @@ export function validateSettingDefinitions(settings: SettingsConfiguration): voi
|
||||
path: ['default'],
|
||||
});
|
||||
}
|
||||
|
||||
// Validate enumMappings
|
||||
if (setting.enumMappings && setting.type === SettingTypes.Enum && setting.options) {
|
||||
for (const option of setting.options) {
|
||||
if (!(option in setting.enumMappings)) {
|
||||
errors.push({
|
||||
code: ZodIssueCode.custom,
|
||||
message: `Missing enumMapping for option "${option}" in setting ${setting.key}.`,
|
||||
path: ['enumMappings'],
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (errors.length > 0) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import {
|
||||
openAISettings,
|
||||
googleSettings,
|
||||
ReasoningEffort,
|
||||
ReasoningSummary,
|
||||
BedrockProviders,
|
||||
anthropicSettings,
|
||||
} from './types';
|
||||
@@ -71,6 +72,11 @@ const baseDefinitions: Record<string, SettingDefinition> = {
|
||||
default: ImageDetail.auto,
|
||||
component: 'slider',
|
||||
options: [ImageDetail.low, ImageDetail.auto, ImageDetail.high],
|
||||
enumMappings: {
|
||||
[ImageDetail.low]: 'com_ui_low',
|
||||
[ImageDetail.auto]: 'com_ui_auto',
|
||||
[ImageDetail.high]: 'com_ui_high',
|
||||
},
|
||||
optionType: 'conversation',
|
||||
columnSpan: 2,
|
||||
},
|
||||
@@ -211,9 +217,70 @@ const openAIParams: Record<string, SettingDefinition> = {
|
||||
description: 'com_endpoint_openai_reasoning_effort',
|
||||
descriptionCode: true,
|
||||
type: 'enum',
|
||||
default: ReasoningEffort.medium,
|
||||
default: ReasoningEffort.none,
|
||||
component: 'slider',
|
||||
options: [ReasoningEffort.low, ReasoningEffort.medium, ReasoningEffort.high],
|
||||
options: [
|
||||
ReasoningEffort.none,
|
||||
ReasoningEffort.low,
|
||||
ReasoningEffort.medium,
|
||||
ReasoningEffort.high,
|
||||
],
|
||||
enumMappings: {
|
||||
[ReasoningEffort.none]: 'com_ui_none',
|
||||
[ReasoningEffort.low]: 'com_ui_low',
|
||||
[ReasoningEffort.medium]: 'com_ui_medium',
|
||||
[ReasoningEffort.high]: 'com_ui_high',
|
||||
},
|
||||
optionType: 'model',
|
||||
columnSpan: 4,
|
||||
},
|
||||
useResponsesApi: {
|
||||
key: 'useResponsesApi',
|
||||
label: 'com_endpoint_use_responses_api',
|
||||
labelCode: true,
|
||||
description: 'com_endpoint_openai_use_responses_api',
|
||||
descriptionCode: true,
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
component: 'switch',
|
||||
optionType: 'model',
|
||||
showDefault: false,
|
||||
columnSpan: 2,
|
||||
},
|
||||
web_search: {
|
||||
key: 'web_search',
|
||||
label: 'com_ui_web_search',
|
||||
labelCode: true,
|
||||
description: 'com_endpoint_openai_use_web_search',
|
||||
descriptionCode: true,
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
component: 'switch',
|
||||
optionType: 'model',
|
||||
showDefault: false,
|
||||
columnSpan: 2,
|
||||
},
|
||||
reasoning_summary: {
|
||||
key: 'reasoning_summary',
|
||||
label: 'com_endpoint_reasoning_summary',
|
||||
labelCode: true,
|
||||
description: 'com_endpoint_openai_reasoning_summary',
|
||||
descriptionCode: true,
|
||||
type: 'enum',
|
||||
default: ReasoningSummary.none,
|
||||
component: 'slider',
|
||||
options: [
|
||||
ReasoningSummary.none,
|
||||
ReasoningSummary.auto,
|
||||
ReasoningSummary.concise,
|
||||
ReasoningSummary.detailed,
|
||||
],
|
||||
enumMappings: {
|
||||
[ReasoningSummary.none]: 'com_ui_none',
|
||||
[ReasoningSummary.auto]: 'com_ui_auto',
|
||||
[ReasoningSummary.concise]: 'com_ui_concise',
|
||||
[ReasoningSummary.detailed]: 'com_ui_detailed',
|
||||
},
|
||||
optionType: 'model',
|
||||
columnSpan: 4,
|
||||
},
|
||||
@@ -347,7 +414,9 @@ const bedrock: Record<string, SettingDefinition> = {
|
||||
labelCode: true,
|
||||
type: 'number',
|
||||
component: 'input',
|
||||
placeholder: 'com_endpoint_anthropic_maxoutputtokens',
|
||||
description: 'com_endpoint_anthropic_maxoutputtokens',
|
||||
descriptionCode: true,
|
||||
placeholder: 'com_nav_theme_system',
|
||||
placeholderCode: true,
|
||||
optionType: 'model',
|
||||
columnSpan: 2,
|
||||
@@ -481,6 +550,19 @@ const google: Record<string, SettingDefinition> = {
|
||||
optionType: 'conversation',
|
||||
columnSpan: 2,
|
||||
},
|
||||
grounding: {
|
||||
key: 'grounding',
|
||||
label: 'com_endpoint_use_search_grounding',
|
||||
labelCode: true,
|
||||
description: 'com_endpoint_google_use_search_grounding',
|
||||
descriptionCode: true,
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
component: 'switch',
|
||||
optionType: 'model',
|
||||
showDefault: false,
|
||||
columnSpan: 2,
|
||||
},
|
||||
};
|
||||
|
||||
const googleConfig: SettingsConfiguration = [
|
||||
@@ -494,6 +576,7 @@ const googleConfig: SettingsConfiguration = [
|
||||
librechat.resendFiles,
|
||||
google.thinking,
|
||||
google.thinkingBudget,
|
||||
google.grounding,
|
||||
];
|
||||
|
||||
const googleCol1: SettingsConfiguration = [
|
||||
@@ -511,6 +594,7 @@ const googleCol2: SettingsConfiguration = [
|
||||
librechat.resendFiles,
|
||||
google.thinking,
|
||||
google.thinkingBudget,
|
||||
google.grounding,
|
||||
];
|
||||
|
||||
const openAI: SettingsConfiguration = [
|
||||
@@ -525,7 +609,10 @@ const openAI: SettingsConfiguration = [
|
||||
baseDefinitions.stop,
|
||||
librechat.resendFiles,
|
||||
baseDefinitions.imageDetail,
|
||||
openAIParams.web_search,
|
||||
openAIParams.reasoning_effort,
|
||||
openAIParams.useResponsesApi,
|
||||
openAIParams.reasoning_summary,
|
||||
];
|
||||
|
||||
const openAICol1: SettingsConfiguration = [
|
||||
@@ -542,9 +629,12 @@ const openAICol2: SettingsConfiguration = [
|
||||
openAIParams.frequency_penalty,
|
||||
openAIParams.presence_penalty,
|
||||
baseDefinitions.stop,
|
||||
openAIParams.reasoning_effort,
|
||||
librechat.resendFiles,
|
||||
baseDefinitions.imageDetail,
|
||||
openAIParams.reasoning_effort,
|
||||
openAIParams.reasoning_summary,
|
||||
openAIParams.useResponsesApi,
|
||||
openAIParams.web_search,
|
||||
];
|
||||
|
||||
const anthropicConfig: SettingsConfiguration = [
|
||||
|
||||
@@ -112,11 +112,19 @@ export enum ImageDetail {
|
||||
}
|
||||
|
||||
export enum ReasoningEffort {
|
||||
none = '',
|
||||
low = 'low',
|
||||
medium = 'medium',
|
||||
high = 'high',
|
||||
}
|
||||
|
||||
export enum ReasoningSummary {
|
||||
none = '',
|
||||
auto = 'auto',
|
||||
concise = 'concise',
|
||||
detailed = 'detailed',
|
||||
}
|
||||
|
||||
export const imageDetailNumeric = {
|
||||
[ImageDetail.low]: 0,
|
||||
[ImageDetail.auto]: 1,
|
||||
@@ -131,6 +139,7 @@ export const imageDetailValue = {
|
||||
|
||||
export const eImageDetailSchema = z.nativeEnum(ImageDetail);
|
||||
export const eReasoningEffortSchema = z.nativeEnum(ReasoningEffort);
|
||||
export const eReasoningSummarySchema = z.nativeEnum(ReasoningSummary);
|
||||
|
||||
export const defaultAssistantFormValues = {
|
||||
assistant: '',
|
||||
@@ -494,6 +503,7 @@ export const tMessageSchema = z.object({
|
||||
title: z.string().nullable().or(z.literal('New Chat')).default('New Chat'),
|
||||
sender: z.string().optional(),
|
||||
text: z.string(),
|
||||
/** @deprecated */
|
||||
generation: z.string().nullable().optional(),
|
||||
isCreatedByUser: z.boolean(),
|
||||
error: z.boolean().optional(),
|
||||
@@ -619,8 +629,15 @@ export const tConversationSchema = z.object({
|
||||
file_ids: z.array(z.string()).optional(),
|
||||
/* vision */
|
||||
imageDetail: eImageDetailSchema.optional(),
|
||||
/* OpenAI: o1 only */
|
||||
reasoning_effort: eReasoningEffortSchema.optional(),
|
||||
/* OpenAI: Reasoning models only */
|
||||
reasoning_effort: eReasoningEffortSchema.optional().nullable(),
|
||||
reasoning_summary: eReasoningSummarySchema.optional().nullable(),
|
||||
/* OpenAI: use Responses API */
|
||||
useResponsesApi: z.boolean().optional(),
|
||||
/* OpenAI: use Responses API with Web Search */
|
||||
web_search: z.boolean().optional(),
|
||||
/* Google: use Search Grounding */
|
||||
grounding: z.boolean().optional(),
|
||||
/* assistant */
|
||||
assistant_id: z.string().optional(),
|
||||
/* agents */
|
||||
@@ -717,6 +734,14 @@ export const tQueryParamsSchema = tConversationSchema
|
||||
top_p: true,
|
||||
/** @endpoints openAI, custom, azureOpenAI */
|
||||
max_tokens: true,
|
||||
/** @endpoints openAI, custom, azureOpenAI */
|
||||
reasoning_effort: true,
|
||||
/** @endpoints openAI, custom, azureOpenAI */
|
||||
reasoning_summary: true,
|
||||
/** @endpoints openAI, custom, azureOpenAI */
|
||||
useResponsesApi: true,
|
||||
/** @endpoints google */
|
||||
grounding: true,
|
||||
/** @endpoints google, anthropic, bedrock */
|
||||
topP: true,
|
||||
/** @endpoints google, anthropic */
|
||||
@@ -799,6 +824,7 @@ export const googleBaseSchema = tConversationSchema.pick({
|
||||
topK: true,
|
||||
thinking: true,
|
||||
thinkingBudget: true,
|
||||
grounding: true,
|
||||
iconURL: true,
|
||||
greeting: true,
|
||||
spec: true,
|
||||
@@ -830,6 +856,7 @@ export const googleGenConfigSchema = z
|
||||
thinkingBudget: coerceNumber.optional(),
|
||||
})
|
||||
.optional(),
|
||||
grounding: z.boolean().optional(),
|
||||
})
|
||||
.strip()
|
||||
.optional();
|
||||
@@ -1044,10 +1071,13 @@ export const openAIBaseSchema = tConversationSchema.pick({
|
||||
maxContextTokens: true,
|
||||
max_tokens: true,
|
||||
reasoning_effort: true,
|
||||
reasoning_summary: true,
|
||||
useResponsesApi: true,
|
||||
web_search: true,
|
||||
});
|
||||
|
||||
export const openAISchema = openAIBaseSchema
|
||||
.transform((obj: Partial<TConversation>) => removeNullishValues(obj))
|
||||
.transform((obj: Partial<TConversation>) => removeNullishValues(obj, true))
|
||||
.catch(() => ({}));
|
||||
|
||||
export const compactGoogleSchema = googleBaseSchema
|
||||
|
||||
@@ -109,6 +109,11 @@ export type TPayload = Partial<TMessage> &
|
||||
messages?: TMessages;
|
||||
isTemporary: boolean;
|
||||
ephemeralAgent?: TEphemeralAgent | null;
|
||||
editedContent?: {
|
||||
index: number;
|
||||
text: string;
|
||||
type: 'text' | 'think';
|
||||
} | null;
|
||||
};
|
||||
|
||||
export type TSubmission = {
|
||||
@@ -127,6 +132,11 @@ export type TSubmission = {
|
||||
endpointOption: TEndpointOption;
|
||||
clientTimestamp?: string;
|
||||
ephemeralAgent?: TEphemeralAgent | null;
|
||||
editedContent?: {
|
||||
index: number;
|
||||
text: string;
|
||||
type: 'text' | 'think';
|
||||
} | null;
|
||||
};
|
||||
|
||||
export type EventSubmission = Omit<TSubmission, 'initialResponse'> & { initialResponse: TMessage };
|
||||
|
||||
@@ -131,8 +131,14 @@ export const conversationPreset = {
|
||||
max_tokens: {
|
||||
type: Number,
|
||||
},
|
||||
/** omni models only */
|
||||
useResponsesApi: {
|
||||
type: Boolean,
|
||||
},
|
||||
/** Reasoning models only */
|
||||
reasoning_effort: {
|
||||
type: String,
|
||||
},
|
||||
reasoning_summary: {
|
||||
type: String,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -46,6 +46,8 @@ export interface IPreset extends Document {
|
||||
maxContextTokens?: number;
|
||||
max_tokens?: number;
|
||||
reasoning_effort?: string;
|
||||
reasoning_summary?: string;
|
||||
useResponsesApi?: boolean;
|
||||
// end of additional fields
|
||||
agentOptions?: unknown;
|
||||
}
|
||||
|
||||
@@ -45,6 +45,10 @@ export interface IConversation extends Document {
|
||||
maxContextTokens?: number;
|
||||
max_tokens?: number;
|
||||
reasoning_effort?: string;
|
||||
reasoning_summary?: string;
|
||||
useResponsesApi?: boolean;
|
||||
web_search?: boolean;
|
||||
grounding?: boolean;
|
||||
// Additional fields
|
||||
files?: string[];
|
||||
expiredAt?: Date;
|
||||
|
||||
60
terms/terms_de.md
Normal file
60
terms/terms_de.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Allgemeine Geschäftsbedingungen für [Ihr Unternehmen/Produktname]
|
||||
|
||||
*Gültigkeitsdatum: [Datum einfügen]*
|
||||
|
||||
Willkommen bei [Ihre Webseite], erreichbar unter [URL einfügen]. Diese Allgemeinen Geschäftsbedingungen („AGB“) regeln Ihre Nutzung unserer Website und Dienstleistungen. Durch den Zugriff auf oder die Nutzung unserer Website stimmen Sie diesen Bedingungen und unserer Datenschutzerklärung zu, die unter [URL zur Datenschutzerklärung einfügen] verfügbar ist.
|
||||
|
||||
## Inhaltsverzeichnis
|
||||
|
||||
1. **Akzeptanz der Bedingungen**
|
||||
- Zustimmung durch Nutzung
|
||||
|
||||
2. **Eigentum und Lizenz**
|
||||
- Gewährte Rechte
|
||||
- Einschränkungen bei Weitergabe oder Weiterverkauf
|
||||
|
||||
3. **Benutzerkonten** (falls zutreffend)
|
||||
- Erstellung und Sicherheit des Kontos
|
||||
- Verantwortlichkeiten des Benutzers
|
||||
|
||||
4. **Benutzerdaten**
|
||||
- Erhebung und Nutzung personenbezogener Daten
|
||||
- Hinweis auf Datenschutzerklärung
|
||||
|
||||
5. **Cookies und nicht-personenbezogene Daten**
|
||||
- Nutzung und Zustimmung
|
||||
|
||||
6. **Richtlinien zur akzeptablen Nutzung**
|
||||
- Zulässige und verbotene Aktivitäten
|
||||
- Konsequenzen bei Verstößen
|
||||
|
||||
7. **Links und Dienste Dritter**
|
||||
- Haftungsausschluss für Inhalte Dritter
|
||||
|
||||
8. **Rechte am geistigen Eigentum**
|
||||
- Eigentum am Inhalt
|
||||
- Einschränkungen der Nutzung geistigen Eigentums
|
||||
|
||||
9. **Gewährleistungsausschluss**
|
||||
- Haftungsbeschränkungen
|
||||
|
||||
10. **Freistellung**
|
||||
- Verpflichtungen des Nutzers zur Freistellung des Unternehmens
|
||||
|
||||
11. **Haftungsbeschränkung**
|
||||
- Begrenzung der Haftung und Ausschlüsse
|
||||
|
||||
12. **Kündigung**
|
||||
- Gründe für eine Kündigung
|
||||
- Auswirkungen einer Kündigung
|
||||
|
||||
13. **Geltendes Recht und Gerichtsstand**
|
||||
- Anwendbares Recht und Gerichtsstand
|
||||
|
||||
14. **Änderungen dieser Bedingungen**
|
||||
- Benachrichtigung und Zustimmung zu Änderungen
|
||||
|
||||
15. **Kontaktinformationen**
|
||||
- Kontaktmöglichkeiten bei Fragen
|
||||
|
||||
Durch den Zugriff auf und die Nutzung von [Ihre Webseite/Dienstleistung] erkennen Sie an, dass Sie diese Allgemeinen Geschäftsbedingungen gelesen und verstanden haben und ihnen zustimmen.
|
||||
60
terms/terms_en.md
Normal file
60
terms/terms_en.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Terms and Conditions for [Your Company/Product Name]
|
||||
|
||||
*Effective Date: [Insert Effective Date]*
|
||||
|
||||
Welcome to [Your Website], accessible at [Insert URL]. These Terms and Conditions ("Terms") govern your use of our website and services. By accessing or using our Website, you agree to comply with these Terms and our Privacy Policy, accessible at [Insert URL to Privacy Policy].
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. **Acceptance of Terms**
|
||||
- Agreement to terms upon use
|
||||
|
||||
2. **Ownership and License**
|
||||
- Rights granted
|
||||
- Restrictions on redistribution or resale
|
||||
|
||||
3. **User Accounts** (if applicable)
|
||||
- Account creation and security
|
||||
- Responsibilities of the user
|
||||
|
||||
4. **User Data**
|
||||
- Collection and use of personal data
|
||||
- Reference to Privacy Policy
|
||||
|
||||
5. **Cookies and Non-Personal Data**
|
||||
- Usage and consent
|
||||
|
||||
6. **Acceptable Use Policy**
|
||||
- Permitted and prohibited activities
|
||||
- Consequences of violations
|
||||
|
||||
7. **Third-Party Links and Services**
|
||||
- Disclaimer for third-party content
|
||||
|
||||
8. **Intellectual Property Rights**
|
||||
- Ownership of content
|
||||
- Restrictions on use of intellectual property
|
||||
|
||||
9. **Disclaimer of Warranties**
|
||||
- Limitations on liability
|
||||
|
||||
10. **Indemnification**
|
||||
- User obligations to indemnify the company
|
||||
|
||||
11. **Limitation of Liability**
|
||||
- Cap on liability and exclusions
|
||||
|
||||
12. **Termination**
|
||||
- Grounds for termination
|
||||
- Effects of termination
|
||||
|
||||
13. **Governing Law and Jurisdiction**
|
||||
- Applicable laws and legal jurisdiction
|
||||
|
||||
14. **Changes to These Terms**
|
||||
- Notification and acceptance of changes
|
||||
|
||||
15. **Contact Information**
|
||||
- How users can reach you with questions
|
||||
|
||||
By accessing and using [Your Website/Service], you acknowledge that you have read, understood, and agree to be bound by these Terms and Conditions.
|
||||
60
terms/terms_fr.md
Normal file
60
terms/terms_fr.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Conditions Générales pour [Votre Entreprise/Nom du Produit]
|
||||
|
||||
*Date d'entrée en vigueur : [Insérer la date]*
|
||||
|
||||
Bienvenue sur [Votre site web], accessible à l'adresse [Insérer URL]. Ces Conditions Générales (« CG ») régissent votre utilisation de notre site web et de nos services. En accédant ou en utilisant notre site web, vous acceptez ces conditions ainsi que notre Politique de confidentialité disponible à l'adresse [Insérer URL vers la Politique de confidentialité].
|
||||
|
||||
## Table des matières
|
||||
|
||||
1. **Acceptation des conditions**
|
||||
- Accord des conditions par utilisation
|
||||
|
||||
2. **Propriété et Licence**
|
||||
- Droits accordés
|
||||
- Restrictions sur la redistribution ou la revente
|
||||
|
||||
3. **Comptes utilisateur** (le cas échéant)
|
||||
- Création et sécurité du compte
|
||||
- Responsabilités de l'utilisateur
|
||||
|
||||
4. **Données Utilisateur**
|
||||
- Collecte et utilisation des données personnelles
|
||||
- Référence à la Politique de confidentialité
|
||||
|
||||
5. **Cookies et données non personnelles**
|
||||
- Utilisation et consentement
|
||||
|
||||
6. **Politique d'utilisation acceptable**
|
||||
- Activités autorisées et interdites
|
||||
- Conséquences en cas de violations
|
||||
|
||||
7. **Liens et services tiers**
|
||||
- Exclusion de responsabilité pour les contenus tiers
|
||||
|
||||
8. **Droits de propriété intellectuelle**
|
||||
- Propriété du contenu
|
||||
- Restrictions sur l'utilisation de la propriété intellectuelle
|
||||
|
||||
9. **Exclusion de garanties**
|
||||
- Limitations de responsabilité
|
||||
|
||||
10. **Indemnisation**
|
||||
- Obligations de l'utilisateur à indemniser l'entreprise
|
||||
|
||||
11. **Limitation de responsabilité**
|
||||
- Plafond de responsabilité et exclusions
|
||||
|
||||
12. **Résiliation**
|
||||
- Motifs de résiliation
|
||||
- Effets de la résiliation
|
||||
|
||||
13. **Loi applicable et juridiction compétente**
|
||||
- Lois applicables et juridiction
|
||||
|
||||
14. **Modifications des présentes conditions**
|
||||
- Notification et acceptation des modifications
|
||||
|
||||
15. **Informations de contact**
|
||||
- Comment nous contacter en cas de questions
|
||||
|
||||
En accédant et en utilisant [Votre site web/service], vous reconnaissez avoir lu, compris et accepté d'être lié par ces Conditions Générales.
|
||||
Reference in New Issue
Block a user