Compare commits
69 Commits
feat/issue
...
chart-1.9.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1e53ffa7ea | ||
|
|
65281464fc | ||
|
|
658921af88 | ||
|
|
ce6456c39f | ||
|
|
d904b281f1 | ||
|
|
5e35b7d09d | ||
|
|
6adb425780 | ||
|
|
e6aeec9f25 | ||
|
|
861ef98d29 | ||
|
|
05c706137e | ||
|
|
9fbc2afe40 | ||
|
|
8adef91cf5 | ||
|
|
70ff6e94f2 | ||
|
|
0e05ff484f | ||
|
|
250209858a | ||
|
|
9e77f835a6 | ||
|
|
7973cb42ef | ||
|
|
0446d0e190 | ||
|
|
33d6b337bc | ||
|
|
64df54528d | ||
|
|
d46dde4e01 | ||
|
|
13b784a3e6 | ||
|
|
90e610ceda | ||
|
|
cbbbde3681 | ||
|
|
05c9195197 | ||
|
|
9495520f6f | ||
|
|
87d7ee4b0e | ||
|
|
d8d5d59d92 | ||
|
|
e3d33fed8d | ||
|
|
cbf52eabe3 | ||
|
|
36f0365fd4 | ||
|
|
589f119310 | ||
|
|
d41b07c0af | ||
|
|
114deecc4e | ||
|
|
f59daaeecc | ||
|
|
bc77bbd1ba | ||
|
|
c602088178 | ||
|
|
3d1cedb85b | ||
|
|
bf2567bc8f | ||
|
|
5ce67b5b71 | ||
|
|
cbd217efae | ||
|
|
d990fe1d5f | ||
|
|
7c9a868d34 | ||
|
|
e9a85d5c65 | ||
|
|
f61afc1124 | ||
|
|
5566cc499e | ||
|
|
ded3f2e998 | ||
|
|
7792fcee17 | ||
|
|
f931731ef8 | ||
|
|
07d0abc9fd | ||
|
|
fbe341a171 | ||
|
|
20282f32c8 | ||
|
|
6fa3db2969 | ||
|
|
ff027e8243 | ||
|
|
e9b678dd6a | ||
|
|
bb7a0274fa | ||
|
|
a5189052ec | ||
|
|
bcd97aad2f | ||
|
|
9c77f53454 | ||
|
|
31a283a4fe | ||
|
|
857c054a9a | ||
|
|
c9103a1708 | ||
|
|
7288449011 | ||
|
|
7897801fbc | ||
|
|
838fb53208 | ||
|
|
9ff608e6af | ||
|
|
1b8a0bfaee | ||
|
|
c0ed738aed | ||
|
|
0e5bb6f98c |
11
.env.example
11
.env.example
@@ -196,7 +196,7 @@ GOOGLE_KEY=user_provided
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=user_provided
|
||||
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,gpt-4.5-preview,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
# OPENAI_MODELS=gpt-5,gpt-5-codex,gpt-5-mini,gpt-5-nano,o3-pro,o3,o4-mini,gpt-4.1,gpt-4.1-mini,gpt-4.1-nano,o3-mini,o1-pro,o1,gpt-4o,gpt-4o-mini
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
@@ -459,6 +459,9 @@ OPENID_CALLBACK_URL=/oauth/openid/callback
|
||||
OPENID_REQUIRED_ROLE=
|
||||
OPENID_REQUIRED_ROLE_TOKEN_KIND=
|
||||
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
|
||||
OPENID_ADMIN_ROLE=
|
||||
OPENID_ADMIN_ROLE_PARAMETER_PATH=
|
||||
OPENID_ADMIN_ROLE_TOKEN_KIND=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's username
|
||||
OPENID_USERNAME_CLAIM=
|
||||
# Set to determine which user info property returned from OpenID Provider to store as the User's name
|
||||
@@ -650,6 +653,12 @@ HELP_AND_FAQ_URL=https://librechat.ai
|
||||
# Google tag manager id
|
||||
#ANALYTICS_GTM_ID=user provided google tag manager id
|
||||
|
||||
# limit conversation file imports to a certain number of bytes in size to avoid the container
|
||||
# maxing out memory limitations by unremarking this line and supplying a file size in bytes
|
||||
# such as the below example of 250 mib
|
||||
# CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES=262144000
|
||||
|
||||
|
||||
#===============#
|
||||
# REDIS Options #
|
||||
#===============#
|
||||
|
||||
78
.github/workflows/cache-integration-tests.yml
vendored
Normal file
78
.github/workflows/cache-integration-tests.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
name: Cache Integration Tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
- release/*
|
||||
paths:
|
||||
- 'packages/api/src/cache/**'
|
||||
- 'redis-config/**'
|
||||
- '.github/workflows/cache-integration-tests.yml'
|
||||
|
||||
jobs:
|
||||
cache_integration_tests:
|
||||
name: Run Cache Integration Tests
|
||||
timeout-minutes: 30
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Use Node.js 20.x
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install Redis tools
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y redis-server redis-tools
|
||||
|
||||
- name: Start Single Redis Instance
|
||||
run: |
|
||||
redis-server --daemonize yes --port 6379
|
||||
sleep 2
|
||||
# Verify single Redis is running
|
||||
redis-cli -p 6379 ping || exit 1
|
||||
|
||||
- name: Start Redis Cluster
|
||||
working-directory: redis-config
|
||||
run: |
|
||||
chmod +x start-cluster.sh stop-cluster.sh
|
||||
./start-cluster.sh
|
||||
sleep 10
|
||||
# Verify cluster is running
|
||||
redis-cli -p 7001 cluster info || exit 1
|
||||
redis-cli -p 7002 cluster info || exit 1
|
||||
redis-cli -p 7003 cluster info || exit 1
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build packages
|
||||
run: |
|
||||
npm run build:data-provider
|
||||
npm run build:data-schemas
|
||||
npm run build:api
|
||||
|
||||
- name: Run cache integration tests
|
||||
working-directory: packages/api
|
||||
env:
|
||||
NODE_ENV: test
|
||||
USE_REDIS: true
|
||||
REDIS_URI: redis://127.0.0.1:6379
|
||||
REDIS_CLUSTER_URI: redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
|
||||
run: npm run test:cache:integration
|
||||
|
||||
- name: Stop Redis Cluster
|
||||
if: always()
|
||||
working-directory: redis-config
|
||||
run: ./stop-cluster.sh || true
|
||||
|
||||
- name: Stop Single Redis Instance
|
||||
if: always()
|
||||
run: redis-cli -p 6379 shutdown || true
|
||||
@@ -1,5 +1,2 @@
|
||||
#!/usr/bin/env sh
|
||||
set -e
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
[ -n "$CI" ] && exit 0
|
||||
npx lint-staged --config ./.husky/lint-staged.config.js
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v0.8.0
|
||||
# v0.8.1-rc1
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Dockerfile.multi
|
||||
# v0.8.0
|
||||
# v0.8.1-rc1
|
||||
|
||||
# Base for all builds
|
||||
FROM node:20-alpine AS base-min
|
||||
|
||||
@@ -1,20 +1,29 @@
|
||||
const crypto = require('crypto');
|
||||
const fetch = require('node-fetch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getBalanceConfig } = require('@librechat/api');
|
||||
const {
|
||||
supportsBalanceCheck,
|
||||
isAgentsEndpoint,
|
||||
isParamEndpoint,
|
||||
EModelEndpoint,
|
||||
getBalanceConfig,
|
||||
extractFileContext,
|
||||
encodeAndFormatAudios,
|
||||
encodeAndFormatVideos,
|
||||
encodeAndFormatDocuments,
|
||||
} = require('@librechat/api');
|
||||
const {
|
||||
Constants,
|
||||
ErrorTypes,
|
||||
FileSources,
|
||||
ContentTypes,
|
||||
excludedKeys,
|
||||
ErrorTypes,
|
||||
Constants,
|
||||
EModelEndpoint,
|
||||
isParamEndpoint,
|
||||
isAgentsEndpoint,
|
||||
supportsBalanceCheck,
|
||||
} = require('librechat-data-provider');
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { checkBalance } = require('~/models/balanceMethods');
|
||||
const { truncateToolCallOutputs } = require('./prompts');
|
||||
const countTokens = require('~/server/utils/countTokens');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const TextStream = require('./TextStream');
|
||||
|
||||
@@ -1198,8 +1207,135 @@ class BaseClient {
|
||||
return await this.sendCompletion(payload, opts);
|
||||
}
|
||||
|
||||
async addDocuments(message, attachments) {
|
||||
const documentResult = await encodeAndFormatDocuments(
|
||||
this.options.req,
|
||||
attachments,
|
||||
{
|
||||
provider: this.options.agent?.provider,
|
||||
useResponsesApi: this.options.agent?.model_parameters?.useResponsesApi,
|
||||
},
|
||||
getStrategyFunctions,
|
||||
);
|
||||
message.documents =
|
||||
documentResult.documents && documentResult.documents.length
|
||||
? documentResult.documents
|
||||
: undefined;
|
||||
return documentResult.files;
|
||||
}
|
||||
|
||||
async addVideos(message, attachments) {
|
||||
const videoResult = await encodeAndFormatVideos(
|
||||
this.options.req,
|
||||
attachments,
|
||||
this.options.agent.provider,
|
||||
getStrategyFunctions,
|
||||
);
|
||||
message.videos =
|
||||
videoResult.videos && videoResult.videos.length ? videoResult.videos : undefined;
|
||||
return videoResult.files;
|
||||
}
|
||||
|
||||
async addAudios(message, attachments) {
|
||||
const audioResult = await encodeAndFormatAudios(
|
||||
this.options.req,
|
||||
attachments,
|
||||
this.options.agent.provider,
|
||||
getStrategyFunctions,
|
||||
);
|
||||
message.audios =
|
||||
audioResult.audios && audioResult.audios.length ? audioResult.audios : undefined;
|
||||
return audioResult.files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts text context from attachments and sets it on the message.
|
||||
* This handles text that was already extracted from files (OCR, transcriptions, document text, etc.)
|
||||
* @param {TMessage} message - The message to add context to
|
||||
* @param {MongoFile[]} attachments - Array of file attachments
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
async addFileContextToMessage(message, attachments) {
|
||||
const fileContext = await extractFileContext({
|
||||
attachments,
|
||||
req: this.options?.req,
|
||||
tokenCountFn: (text) => countTokens(text),
|
||||
});
|
||||
|
||||
if (fileContext) {
|
||||
message.fileContext = fileContext;
|
||||
}
|
||||
}
|
||||
|
||||
async processAttachments(message, attachments) {
|
||||
const categorizedAttachments = {
|
||||
images: [],
|
||||
videos: [],
|
||||
audios: [],
|
||||
documents: [],
|
||||
};
|
||||
|
||||
const allFiles = [];
|
||||
|
||||
for (const file of attachments) {
|
||||
/** @type {FileSources} */
|
||||
const source = file.source ?? FileSources.local;
|
||||
if (source === FileSources.text) {
|
||||
allFiles.push(file);
|
||||
continue;
|
||||
}
|
||||
if (file.embedded === true || file.metadata?.fileIdentifier != null) {
|
||||
allFiles.push(file);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (file.type.startsWith('image/')) {
|
||||
categorizedAttachments.images.push(file);
|
||||
} else if (file.type === 'application/pdf') {
|
||||
categorizedAttachments.documents.push(file);
|
||||
allFiles.push(file);
|
||||
} else if (file.type.startsWith('video/')) {
|
||||
categorizedAttachments.videos.push(file);
|
||||
allFiles.push(file);
|
||||
} else if (file.type.startsWith('audio/')) {
|
||||
categorizedAttachments.audios.push(file);
|
||||
allFiles.push(file);
|
||||
}
|
||||
}
|
||||
|
||||
const [imageFiles] = await Promise.all([
|
||||
categorizedAttachments.images.length > 0
|
||||
? this.addImageURLs(message, categorizedAttachments.images)
|
||||
: Promise.resolve([]),
|
||||
categorizedAttachments.documents.length > 0
|
||||
? this.addDocuments(message, categorizedAttachments.documents)
|
||||
: Promise.resolve([]),
|
||||
categorizedAttachments.videos.length > 0
|
||||
? this.addVideos(message, categorizedAttachments.videos)
|
||||
: Promise.resolve([]),
|
||||
categorizedAttachments.audios.length > 0
|
||||
? this.addAudios(message, categorizedAttachments.audios)
|
||||
: Promise.resolve([]),
|
||||
]);
|
||||
|
||||
allFiles.push(...imageFiles);
|
||||
|
||||
const seenFileIds = new Set();
|
||||
const uniqueFiles = [];
|
||||
|
||||
for (const file of allFiles) {
|
||||
if (file.file_id && !seenFileIds.has(file.file_id)) {
|
||||
seenFileIds.add(file.file_id);
|
||||
uniqueFiles.push(file);
|
||||
} else if (!file.file_id) {
|
||||
uniqueFiles.push(file);
|
||||
}
|
||||
}
|
||||
|
||||
return uniqueFiles;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage[]} _messages
|
||||
* @returns {Promise<TMessage[]>}
|
||||
*/
|
||||
@@ -1248,7 +1384,8 @@ class BaseClient {
|
||||
{},
|
||||
);
|
||||
|
||||
await this.addImageURLs(message, files, this.visionMode);
|
||||
await this.addFileContextToMessage(message, files);
|
||||
await this.processAttachments(message, files);
|
||||
|
||||
this.message_file_map[message.messageId] = files;
|
||||
return message;
|
||||
|
||||
@@ -2,7 +2,7 @@ const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { Ollama } = require('ollama');
|
||||
const { sleep } = require('@librechat/agents');
|
||||
const { logAxiosError } = require('@librechat/api');
|
||||
const { resolveHeaders } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { deriveBaseURL } = require('~/utils');
|
||||
@@ -44,6 +44,7 @@ class OllamaClient {
|
||||
constructor(options = {}) {
|
||||
const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434');
|
||||
this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||
this.headers = options.headers ?? {};
|
||||
/** @type {Ollama} */
|
||||
this.client = new Ollama({ host });
|
||||
}
|
||||
@@ -51,27 +52,32 @@ class OllamaClient {
|
||||
/**
|
||||
* Fetches Ollama models from the specified base API path.
|
||||
* @param {string} baseURL
|
||||
* @param {Object} [options] - Optional configuration
|
||||
* @param {Partial<IUser>} [options.user] - User object for header resolution
|
||||
* @param {Record<string, string>} [options.headers] - Headers to include in the request
|
||||
* @returns {Promise<string[]>} The Ollama models.
|
||||
* @throws {Error} Throws if the Ollama API request fails
|
||||
*/
|
||||
static async fetchModels(baseURL) {
|
||||
let models = [];
|
||||
static async fetchModels(baseURL, options = {}) {
|
||||
if (!baseURL) {
|
||||
return models;
|
||||
}
|
||||
try {
|
||||
const ollamaEndpoint = deriveBaseURL(baseURL);
|
||||
/** @type {Promise<AxiosResponse<OllamaListResponse>>} */
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
|
||||
timeout: 5000,
|
||||
});
|
||||
models = response.data.models.map((tag) => tag.name);
|
||||
return models;
|
||||
} catch (error) {
|
||||
const logMessage =
|
||||
"Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn't start with `ollama` (case-insensitive).";
|
||||
logAxiosError({ message: logMessage, error });
|
||||
return [];
|
||||
}
|
||||
|
||||
const ollamaEndpoint = deriveBaseURL(baseURL);
|
||||
|
||||
const resolvedHeaders = resolveHeaders({
|
||||
headers: options.headers,
|
||||
user: options.user,
|
||||
});
|
||||
|
||||
/** @type {Promise<AxiosResponse<OllamaListResponse>>} */
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`, {
|
||||
headers: resolvedHeaders,
|
||||
timeout: 5000,
|
||||
});
|
||||
|
||||
const models = response.data.models.map((tag) => tag.name);
|
||||
return models;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -43,7 +43,6 @@ const { runTitleChain } = require('./chains');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { tokenSplit } = require('./document');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { createLLM } = require('./llm');
|
||||
|
||||
class OpenAIClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
@@ -614,65 +613,8 @@ class OpenAIClient extends BaseClient {
|
||||
return (reply ?? '').trim();
|
||||
}
|
||||
|
||||
initializeLLM({
|
||||
model = openAISettings.model.default,
|
||||
modelName,
|
||||
temperature = 0.2,
|
||||
max_tokens,
|
||||
streaming,
|
||||
}) {
|
||||
const modelOptions = {
|
||||
modelName: modelName ?? model,
|
||||
temperature,
|
||||
user: this.user,
|
||||
};
|
||||
|
||||
if (max_tokens) {
|
||||
modelOptions.max_tokens = max_tokens;
|
||||
}
|
||||
|
||||
const configOptions = {};
|
||||
|
||||
if (this.langchainProxy) {
|
||||
configOptions.basePath = this.langchainProxy;
|
||||
}
|
||||
|
||||
if (this.useOpenRouter) {
|
||||
configOptions.basePath = 'https://openrouter.ai/api/v1';
|
||||
configOptions.baseOptions = {
|
||||
headers: {
|
||||
'HTTP-Referer': 'https://librechat.ai',
|
||||
'X-Title': 'LibreChat',
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const { headers } = this.options;
|
||||
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
|
||||
configOptions.baseOptions = {
|
||||
headers: resolveHeaders({
|
||||
headers: {
|
||||
...headers,
|
||||
...configOptions?.baseOptions?.headers,
|
||||
},
|
||||
}),
|
||||
};
|
||||
}
|
||||
|
||||
if (this.options.proxy) {
|
||||
configOptions.httpAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
configOptions.httpsAgent = new HttpsProxyAgent(this.options.proxy);
|
||||
}
|
||||
|
||||
const llm = createLLM({
|
||||
modelOptions,
|
||||
configOptions,
|
||||
openAIApiKey: this.apiKey,
|
||||
azure: this.azure,
|
||||
streaming,
|
||||
});
|
||||
|
||||
return llm;
|
||||
initializeLLM() {
|
||||
throw new Error('Deprecated');
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
const { ChatOpenAI } = require('@langchain/openai');
|
||||
const { isEnabled, sanitizeModelName, constructAzureURL } = require('@librechat/api');
|
||||
|
||||
/**
|
||||
* Creates a new instance of a language model (LLM) for chat interactions.
|
||||
*
|
||||
* @param {Object} options - The options for creating the LLM.
|
||||
* @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings.
|
||||
* @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers.
|
||||
* @param {Callbacks} [options.callbacks] - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count.
|
||||
* @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode.
|
||||
* @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication.
|
||||
* @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations.
|
||||
*
|
||||
* @returns {ChatOpenAI} An instance of the ChatOpenAI class, configured with the provided options.
|
||||
*
|
||||
* @example
|
||||
* const llm = createLLM({
|
||||
* modelOptions: { modelName: 'gpt-4o-mini', temperature: 0.2 },
|
||||
* configOptions: { basePath: 'https://example.api/path' },
|
||||
* callbacks: { onMessage: handleMessage },
|
||||
* openAIApiKey: 'your-api-key'
|
||||
* });
|
||||
*/
|
||||
function createLLM({
|
||||
modelOptions,
|
||||
configOptions,
|
||||
callbacks,
|
||||
streaming = false,
|
||||
openAIApiKey,
|
||||
azure = {},
|
||||
}) {
|
||||
let credentials = { openAIApiKey };
|
||||
let configuration = {
|
||||
apiKey: openAIApiKey,
|
||||
...(configOptions.basePath && { baseURL: configOptions.basePath }),
|
||||
};
|
||||
|
||||
/** @type {AzureOptions} */
|
||||
let azureOptions = {};
|
||||
if (azure) {
|
||||
const useModelName = isEnabled(process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME);
|
||||
|
||||
credentials = {};
|
||||
configuration = {};
|
||||
azureOptions = azure;
|
||||
|
||||
azureOptions.azureOpenAIApiDeploymentName = useModelName
|
||||
? sanitizeModelName(modelOptions.modelName)
|
||||
: azureOptions.azureOpenAIApiDeploymentName;
|
||||
}
|
||||
|
||||
if (azure && process.env.AZURE_OPENAI_DEFAULT_MODEL) {
|
||||
modelOptions.modelName = process.env.AZURE_OPENAI_DEFAULT_MODEL;
|
||||
}
|
||||
|
||||
if (azure && configOptions.basePath) {
|
||||
const azureURL = constructAzureURL({
|
||||
baseURL: configOptions.basePath,
|
||||
azureOptions,
|
||||
});
|
||||
azureOptions.azureOpenAIBasePath = azureURL.split(
|
||||
`/${azureOptions.azureOpenAIApiDeploymentName}`,
|
||||
)[0];
|
||||
}
|
||||
|
||||
return new ChatOpenAI(
|
||||
{
|
||||
streaming,
|
||||
credentials,
|
||||
configuration,
|
||||
...azureOptions,
|
||||
...modelOptions,
|
||||
...credentials,
|
||||
callbacks,
|
||||
},
|
||||
configOptions,
|
||||
);
|
||||
}
|
||||
|
||||
module.exports = createLLM;
|
||||
@@ -1,7 +1,5 @@
|
||||
const createLLM = require('./createLLM');
|
||||
const createCoherePayload = require('./createCoherePayload');
|
||||
|
||||
module.exports = {
|
||||
createLLM,
|
||||
createCoherePayload,
|
||||
};
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
require('dotenv').config();
|
||||
const { ChatOpenAI } = require('@langchain/openai');
|
||||
const { getBufferString, ConversationSummaryBufferMemory } = require('langchain/memory');
|
||||
|
||||
const chatPromptMemory = new ConversationSummaryBufferMemory({
|
||||
llm: new ChatOpenAI({ modelName: 'gpt-4o-mini', temperature: 0 }),
|
||||
maxTokenLimit: 10,
|
||||
returnMessages: true,
|
||||
});
|
||||
|
||||
(async () => {
|
||||
await chatPromptMemory.saveContext({ input: 'hi my name\'s Danny' }, { output: 'whats up' });
|
||||
await chatPromptMemory.saveContext({ input: 'not much you' }, { output: 'not much' });
|
||||
await chatPromptMemory.saveContext(
|
||||
{ input: 'are you excited for the olympics?' },
|
||||
{ output: 'not really' },
|
||||
);
|
||||
|
||||
// We can also utilize the predict_new_summary method directly.
|
||||
const messages = await chatPromptMemory.chatHistory.getMessages();
|
||||
console.log('MESSAGES\n\n');
|
||||
console.log(JSON.stringify(messages));
|
||||
const previous_summary = '';
|
||||
const predictSummary = await chatPromptMemory.predictNewSummary(messages, previous_summary);
|
||||
console.log('SUMMARY\n\n');
|
||||
console.log(JSON.stringify(getBufferString([{ role: 'system', content: predictSummary }])));
|
||||
|
||||
// const { history } = await chatPromptMemory.loadMemoryVariables({});
|
||||
// console.log('HISTORY\n\n');
|
||||
// console.log(JSON.stringify(history));
|
||||
})();
|
||||
@@ -3,6 +3,7 @@ const { EModelEndpoint, ArtifactModes } = require('librechat-data-provider');
|
||||
const { generateShadcnPrompt } = require('~/app/clients/prompts/shadcn-docs/generate');
|
||||
const { components } = require('~/app/clients/prompts/shadcn-docs/components');
|
||||
|
||||
/** @deprecated */
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const artifactsPromptV1 = dedent`The assistant can create and reference artifacts during conversations.
|
||||
|
||||
@@ -115,6 +116,7 @@ Here are some examples of correct usage of artifacts:
|
||||
</assistant_response>
|
||||
</example>
|
||||
</examples>`;
|
||||
|
||||
const artifactsPrompt = dedent`The assistant can create and reference artifacts during conversations.
|
||||
|
||||
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
|
||||
@@ -165,6 +167,10 @@ Artifacts are for substantial, self-contained content that users might modify or
|
||||
- SVG: "image/svg+xml"
|
||||
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
|
||||
- The assistant should specify the viewbox of the SVG rather than defining a width/height
|
||||
- Markdown: "text/markdown" or "text/md"
|
||||
- The user interface will render Markdown content placed within the artifact tags.
|
||||
- Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more.
|
||||
- Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content.
|
||||
- Mermaid Diagrams: "application/vnd.mermaid"
|
||||
- The user interface will render Mermaid diagrams placed within the artifact tags.
|
||||
- React Components: "application/vnd.react"
|
||||
@@ -366,6 +372,10 @@ Artifacts are for substantial, self-contained content that users might modify or
|
||||
- SVG: "image/svg+xml"
|
||||
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
|
||||
- The assistant should specify the viewbox of the SVG rather than defining a width/height
|
||||
- Markdown: "text/markdown" or "text/md"
|
||||
- The user interface will render Markdown content placed within the artifact tags.
|
||||
- Supports standard Markdown syntax including headers, lists, links, images, code blocks, tables, and more.
|
||||
- Both "text/markdown" and "text/md" are accepted as valid MIME types for Markdown content.
|
||||
- Mermaid Diagrams: "application/vnd.mermaid"
|
||||
- The user interface will render Mermaid diagrams placed within the artifact tags.
|
||||
- React Components: "application/vnd.react"
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
const { z } = require('zod');
|
||||
const path = require('path');
|
||||
const OpenAI = require('openai');
|
||||
const fetch = require('node-fetch');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
const { ProxyAgent } = require('undici');
|
||||
const { ProxyAgent, fetch } = require('undici');
|
||||
const { Tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getImageBasename } = require('@librechat/api');
|
||||
|
||||
@@ -5,6 +5,7 @@ const FormData = require('form-data');
|
||||
const { ProxyAgent } = require('undici');
|
||||
const { tool } = require('@langchain/core/tools');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { logAxiosError, oaiToolkit } = require('@librechat/api');
|
||||
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
@@ -348,16 +349,7 @@ Error Message: ${error.message}`);
|
||||
};
|
||||
|
||||
if (process.env.PROXY) {
|
||||
try {
|
||||
const url = new URL(process.env.PROXY);
|
||||
axiosConfig.proxy = {
|
||||
host: url.hostname.replace(/^\[|\]$/g, ''),
|
||||
port: url.port ? parseInt(url.port, 10) : undefined,
|
||||
protocol: url.protocol.replace(':', ''),
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Error parsing proxy URL:', error);
|
||||
}
|
||||
axiosConfig.httpsAgent = new HttpsProxyAgent(process.env.PROXY);
|
||||
}
|
||||
|
||||
if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) {
|
||||
|
||||
@@ -30,7 +30,6 @@ jest.mock('~/server/services/Config', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
const { BaseLLM } = require('@langchain/openai');
|
||||
const { Calculator } = require('@langchain/community/tools/calculator');
|
||||
|
||||
const { User } = require('~/db/models');
|
||||
@@ -172,7 +171,6 @@ describe('Tool Handlers', () => {
|
||||
beforeAll(async () => {
|
||||
const toolMap = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseLLM,
|
||||
tools: sampleTools,
|
||||
returnMap: true,
|
||||
useSpecs: true,
|
||||
@@ -266,7 +264,6 @@ describe('Tool Handlers', () => {
|
||||
it('returns an empty object when no tools are requested', async () => {
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseLLM,
|
||||
returnMap: true,
|
||||
useSpecs: true,
|
||||
});
|
||||
@@ -276,7 +273,6 @@ describe('Tool Handlers', () => {
|
||||
process.env.SD_WEBUI_URL = mockCredential;
|
||||
toolFunctions = await loadTools({
|
||||
user: fakeUser._id,
|
||||
model: BaseLLM,
|
||||
tools: ['stable-diffusion'],
|
||||
functions: true,
|
||||
returnMap: true,
|
||||
|
||||
108
api/cache/cacheFactory.js
vendored
108
api/cache/cacheFactory.js
vendored
@@ -1,108 +0,0 @@
|
||||
const KeyvRedis = require('@keyv/redis').default;
|
||||
const { Keyv } = require('keyv');
|
||||
const { RedisStore } = require('rate-limit-redis');
|
||||
const { Time } = require('librechat-data-provider');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { RedisStore: ConnectRedis } = require('connect-redis');
|
||||
const MemoryStore = require('memorystore')(require('express-session'));
|
||||
const { keyvRedisClient, ioredisClient, GLOBAL_PREFIX_SEPARATOR } = require('./redisClients');
|
||||
const { cacheConfig } = require('./cacheConfig');
|
||||
const { violationFile } = require('./keyvFiles');
|
||||
|
||||
/**
|
||||
* Creates a cache instance using Redis or a fallback store. Suitable for general caching needs.
|
||||
* @param {string} namespace - The cache namespace.
|
||||
* @param {number} [ttl] - Time to live for cache entries.
|
||||
* @param {object} [fallbackStore] - Optional fallback store if Redis is not used.
|
||||
* @returns {Keyv} Cache instance.
|
||||
*/
|
||||
const standardCache = (namespace, ttl = undefined, fallbackStore = undefined) => {
|
||||
if (
|
||||
cacheConfig.USE_REDIS &&
|
||||
!cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES?.includes(namespace)
|
||||
) {
|
||||
try {
|
||||
const keyvRedis = new KeyvRedis(keyvRedisClient);
|
||||
const cache = new Keyv(keyvRedis, { namespace, ttl });
|
||||
keyvRedis.namespace = cacheConfig.REDIS_KEY_PREFIX;
|
||||
keyvRedis.keyPrefixSeparator = GLOBAL_PREFIX_SEPARATOR;
|
||||
|
||||
cache.on('error', (err) => {
|
||||
logger.error(`Cache error in namespace ${namespace}:`, err);
|
||||
});
|
||||
|
||||
return cache;
|
||||
} catch (err) {
|
||||
logger.error(`Failed to create Redis cache for namespace ${namespace}:`, err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
if (fallbackStore) return new Keyv({ store: fallbackStore, namespace, ttl });
|
||||
return new Keyv({ namespace, ttl });
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a cache instance for storing violation data.
|
||||
* Uses a file-based fallback store if Redis is not enabled.
|
||||
* @param {string} namespace - The cache namespace for violations.
|
||||
* @param {number} [ttl] - Time to live for cache entries.
|
||||
* @returns {Keyv} Cache instance for violations.
|
||||
*/
|
||||
const violationCache = (namespace, ttl = undefined) => {
|
||||
return standardCache(`violations:${namespace}`, ttl, violationFile);
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a session cache instance using Redis or in-memory store.
|
||||
* @param {string} namespace - The session namespace.
|
||||
* @param {number} [ttl] - Time to live for session entries.
|
||||
* @returns {MemoryStore | ConnectRedis} Session store instance.
|
||||
*/
|
||||
const sessionCache = (namespace, ttl = undefined) => {
|
||||
namespace = namespace.endsWith(':') ? namespace : `${namespace}:`;
|
||||
if (!cacheConfig.USE_REDIS) return new MemoryStore({ ttl, checkPeriod: Time.ONE_DAY });
|
||||
const store = new ConnectRedis({ client: ioredisClient, ttl, prefix: namespace });
|
||||
if (ioredisClient) {
|
||||
ioredisClient.on('error', (err) => {
|
||||
logger.error(`Session store Redis error for namespace ${namespace}:`, err);
|
||||
});
|
||||
}
|
||||
return store;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a rate limiter cache using Redis.
|
||||
* @param {string} prefix - The key prefix for rate limiting.
|
||||
* @returns {RedisStore|undefined} RedisStore instance or undefined if Redis is not used.
|
||||
*/
|
||||
const limiterCache = (prefix) => {
|
||||
if (!prefix) throw new Error('prefix is required');
|
||||
if (!cacheConfig.USE_REDIS) return undefined;
|
||||
prefix = prefix.endsWith(':') ? prefix : `${prefix}:`;
|
||||
|
||||
try {
|
||||
if (!ioredisClient) {
|
||||
logger.warn(`Redis client not available for rate limiter with prefix ${prefix}`);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return new RedisStore({ sendCommand, prefix });
|
||||
} catch (err) {
|
||||
logger.error(`Failed to create Redis rate limiter for prefix ${prefix}:`, err);
|
||||
return undefined;
|
||||
}
|
||||
};
|
||||
|
||||
const sendCommand = (...args) => {
|
||||
if (!ioredisClient) {
|
||||
logger.warn('Redis client not available for command execution');
|
||||
return Promise.reject(new Error('Redis client not available'));
|
||||
}
|
||||
|
||||
return ioredisClient.call(...args).catch((err) => {
|
||||
logger.error('Redis command execution failed:', err);
|
||||
throw err;
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = { standardCache, sessionCache, violationCache, limiterCache };
|
||||
432
api/cache/cacheFactory.spec.js
vendored
432
api/cache/cacheFactory.spec.js
vendored
@@ -1,432 +0,0 @@
|
||||
const { Time } = require('librechat-data-provider');
|
||||
|
||||
// Mock dependencies first
|
||||
const mockKeyvRedis = {
|
||||
namespace: '',
|
||||
keyPrefixSeparator: '',
|
||||
};
|
||||
|
||||
const mockKeyv = jest.fn().mockReturnValue({
|
||||
mock: 'keyv',
|
||||
on: jest.fn(),
|
||||
});
|
||||
const mockConnectRedis = jest.fn().mockReturnValue({ mock: 'connectRedis' });
|
||||
const mockMemoryStore = jest.fn().mockReturnValue({ mock: 'memoryStore' });
|
||||
const mockRedisStore = jest.fn().mockReturnValue({ mock: 'redisStore' });
|
||||
|
||||
const mockIoredisClient = {
|
||||
call: jest.fn(),
|
||||
on: jest.fn(),
|
||||
};
|
||||
|
||||
const mockKeyvRedisClient = {};
|
||||
const mockViolationFile = {};
|
||||
|
||||
// Mock modules before requiring the main module
|
||||
jest.mock('@keyv/redis', () => ({
|
||||
default: jest.fn().mockImplementation(() => mockKeyvRedis),
|
||||
}));
|
||||
|
||||
jest.mock('keyv', () => ({
|
||||
Keyv: mockKeyv,
|
||||
}));
|
||||
|
||||
jest.mock('./cacheConfig', () => ({
|
||||
cacheConfig: {
|
||||
USE_REDIS: false,
|
||||
REDIS_KEY_PREFIX: 'test',
|
||||
FORCED_IN_MEMORY_CACHE_NAMESPACES: [],
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('./redisClients', () => ({
|
||||
keyvRedisClient: mockKeyvRedisClient,
|
||||
ioredisClient: mockIoredisClient,
|
||||
GLOBAL_PREFIX_SEPARATOR: '::',
|
||||
}));
|
||||
|
||||
jest.mock('./keyvFiles', () => ({
|
||||
violationFile: mockViolationFile,
|
||||
}));
|
||||
|
||||
jest.mock('connect-redis', () => ({ RedisStore: mockConnectRedis }));
|
||||
|
||||
jest.mock('memorystore', () => jest.fn(() => mockMemoryStore));
|
||||
|
||||
jest.mock('rate-limit-redis', () => ({
|
||||
RedisStore: mockRedisStore,
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/data-schemas', () => ({
|
||||
logger: {
|
||||
error: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
info: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
// Import after mocking
|
||||
const { standardCache, sessionCache, violationCache, limiterCache } = require('./cacheFactory');
|
||||
const { cacheConfig } = require('./cacheConfig');
|
||||
|
||||
describe('cacheFactory', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
// Reset cache config mock
|
||||
cacheConfig.USE_REDIS = false;
|
||||
cacheConfig.REDIS_KEY_PREFIX = 'test';
|
||||
cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES = [];
|
||||
});
|
||||
|
||||
describe('redisCache', () => {
|
||||
it('should create Redis cache when USE_REDIS is true', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'test-namespace';
|
||||
const ttl = 3600;
|
||||
|
||||
standardCache(namespace, ttl);
|
||||
|
||||
expect(require('@keyv/redis').default).toHaveBeenCalledWith(mockKeyvRedisClient);
|
||||
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl });
|
||||
expect(mockKeyvRedis.namespace).toBe(cacheConfig.REDIS_KEY_PREFIX);
|
||||
expect(mockKeyvRedis.keyPrefixSeparator).toBe('::');
|
||||
});
|
||||
|
||||
it('should create Redis cache with undefined ttl when not provided', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'test-namespace';
|
||||
|
||||
standardCache(namespace);
|
||||
|
||||
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl: undefined });
|
||||
});
|
||||
|
||||
it('should use fallback store when USE_REDIS is false and fallbackStore is provided', () => {
|
||||
cacheConfig.USE_REDIS = false;
|
||||
const namespace = 'test-namespace';
|
||||
const ttl = 3600;
|
||||
const fallbackStore = { some: 'store' };
|
||||
|
||||
standardCache(namespace, ttl, fallbackStore);
|
||||
|
||||
expect(mockKeyv).toHaveBeenCalledWith({ store: fallbackStore, namespace, ttl });
|
||||
});
|
||||
|
||||
it('should create default Keyv instance when USE_REDIS is false and no fallbackStore', () => {
|
||||
cacheConfig.USE_REDIS = false;
|
||||
const namespace = 'test-namespace';
|
||||
const ttl = 3600;
|
||||
|
||||
standardCache(namespace, ttl);
|
||||
|
||||
expect(mockKeyv).toHaveBeenCalledWith({ namespace, ttl });
|
||||
});
|
||||
|
||||
it('should handle namespace and ttl as undefined', () => {
|
||||
cacheConfig.USE_REDIS = false;
|
||||
|
||||
standardCache();
|
||||
|
||||
expect(mockKeyv).toHaveBeenCalledWith({ namespace: undefined, ttl: undefined });
|
||||
});
|
||||
|
||||
it('should use fallback when namespace is in FORCED_IN_MEMORY_CACHE_NAMESPACES', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES = ['forced-memory'];
|
||||
const namespace = 'forced-memory';
|
||||
const ttl = 3600;
|
||||
|
||||
standardCache(namespace, ttl);
|
||||
|
||||
expect(require('@keyv/redis').default).not.toHaveBeenCalled();
|
||||
expect(mockKeyv).toHaveBeenCalledWith({ namespace, ttl });
|
||||
});
|
||||
|
||||
it('should use Redis when namespace is not in FORCED_IN_MEMORY_CACHE_NAMESPACES', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES = ['other-namespace'];
|
||||
const namespace = 'test-namespace';
|
||||
const ttl = 3600;
|
||||
|
||||
standardCache(namespace, ttl);
|
||||
|
||||
expect(require('@keyv/redis').default).toHaveBeenCalledWith(mockKeyvRedisClient);
|
||||
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl });
|
||||
});
|
||||
|
||||
it('should throw error when Redis cache creation fails', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'test-namespace';
|
||||
const ttl = 3600;
|
||||
const testError = new Error('Redis connection failed');
|
||||
|
||||
const KeyvRedis = require('@keyv/redis').default;
|
||||
KeyvRedis.mockImplementationOnce(() => {
|
||||
throw testError;
|
||||
});
|
||||
|
||||
expect(() => standardCache(namespace, ttl)).toThrow('Redis connection failed');
|
||||
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
`Failed to create Redis cache for namespace ${namespace}:`,
|
||||
testError,
|
||||
);
|
||||
|
||||
expect(mockKeyv).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('violationCache', () => {
|
||||
it('should create violation cache with prefixed namespace', () => {
|
||||
const namespace = 'test-violations';
|
||||
const ttl = 7200;
|
||||
|
||||
// We can't easily mock the internal redisCache call since it's in the same module
|
||||
// But we can test that the function executes without throwing
|
||||
expect(() => violationCache(namespace, ttl)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should create violation cache with undefined ttl', () => {
|
||||
const namespace = 'test-violations';
|
||||
|
||||
violationCache(namespace);
|
||||
|
||||
// The function should call redisCache with violations: prefixed namespace
|
||||
// Since we can't easily mock the internal redisCache call, we test the behavior
|
||||
expect(() => violationCache(namespace)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should handle undefined namespace', () => {
|
||||
expect(() => violationCache(undefined)).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
||||
describe('sessionCache', () => {
|
||||
it('should return MemoryStore when USE_REDIS is false', () => {
|
||||
cacheConfig.USE_REDIS = false;
|
||||
const namespace = 'sessions';
|
||||
const ttl = 86400;
|
||||
|
||||
const result = sessionCache(namespace, ttl);
|
||||
|
||||
expect(mockMemoryStore).toHaveBeenCalledWith({ ttl, checkPeriod: Time.ONE_DAY });
|
||||
expect(result).toBe(mockMemoryStore());
|
||||
});
|
||||
|
||||
it('should return ConnectRedis when USE_REDIS is true', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'sessions';
|
||||
const ttl = 86400;
|
||||
|
||||
const result = sessionCache(namespace, ttl);
|
||||
|
||||
expect(mockConnectRedis).toHaveBeenCalledWith({
|
||||
client: mockIoredisClient,
|
||||
ttl,
|
||||
prefix: `${namespace}:`,
|
||||
});
|
||||
expect(result).toBe(mockConnectRedis());
|
||||
});
|
||||
|
||||
it('should add colon to namespace if not present', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'sessions';
|
||||
|
||||
sessionCache(namespace);
|
||||
|
||||
expect(mockConnectRedis).toHaveBeenCalledWith({
|
||||
client: mockIoredisClient,
|
||||
ttl: undefined,
|
||||
prefix: 'sessions:',
|
||||
});
|
||||
});
|
||||
|
||||
it('should not add colon to namespace if already present', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'sessions:';
|
||||
|
||||
sessionCache(namespace);
|
||||
|
||||
expect(mockConnectRedis).toHaveBeenCalledWith({
|
||||
client: mockIoredisClient,
|
||||
ttl: undefined,
|
||||
prefix: 'sessions:',
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle undefined ttl', () => {
|
||||
cacheConfig.USE_REDIS = false;
|
||||
const namespace = 'sessions';
|
||||
|
||||
sessionCache(namespace);
|
||||
|
||||
expect(mockMemoryStore).toHaveBeenCalledWith({
|
||||
ttl: undefined,
|
||||
checkPeriod: Time.ONE_DAY,
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw error when ConnectRedis constructor fails', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'sessions';
|
||||
const ttl = 86400;
|
||||
|
||||
// Mock ConnectRedis to throw an error during construction
|
||||
const redisError = new Error('Redis connection failed');
|
||||
mockConnectRedis.mockImplementationOnce(() => {
|
||||
throw redisError;
|
||||
});
|
||||
|
||||
// The error should propagate up, not be caught
|
||||
expect(() => sessionCache(namespace, ttl)).toThrow('Redis connection failed');
|
||||
|
||||
// Verify that MemoryStore was NOT used as fallback
|
||||
expect(mockMemoryStore).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should register error handler but let errors propagate to Express', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'sessions';
|
||||
|
||||
// Create a mock session store with middleware methods
|
||||
const mockSessionStore = {
|
||||
get: jest.fn(),
|
||||
set: jest.fn(),
|
||||
destroy: jest.fn(),
|
||||
};
|
||||
mockConnectRedis.mockReturnValue(mockSessionStore);
|
||||
|
||||
const store = sessionCache(namespace);
|
||||
|
||||
// Verify error handler was registered
|
||||
expect(mockIoredisClient.on).toHaveBeenCalledWith('error', expect.any(Function));
|
||||
|
||||
// Get the error handler
|
||||
const errorHandler = mockIoredisClient.on.mock.calls.find((call) => call[0] === 'error')[1];
|
||||
|
||||
// Simulate an error from Redis during a session operation
|
||||
const redisError = new Error('Socket closed unexpectedly');
|
||||
|
||||
// The error handler should log but not swallow the error
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
errorHandler(redisError);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
`Session store Redis error for namespace ${namespace}::`,
|
||||
redisError,
|
||||
);
|
||||
|
||||
// Now simulate what happens when session middleware tries to use the store
|
||||
const callback = jest.fn();
|
||||
mockSessionStore.get.mockImplementation((sid, cb) => {
|
||||
cb(new Error('Redis connection lost'));
|
||||
});
|
||||
|
||||
// Call the store's get method (as Express session would)
|
||||
store.get('test-session-id', callback);
|
||||
|
||||
// The error should be passed to the callback, not swallowed
|
||||
expect(callback).toHaveBeenCalledWith(new Error('Redis connection lost'));
|
||||
});
|
||||
|
||||
it('should handle null ioredisClient gracefully', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const namespace = 'sessions';
|
||||
|
||||
// Temporarily set ioredisClient to null (simulating connection not established)
|
||||
const originalClient = require('./redisClients').ioredisClient;
|
||||
require('./redisClients').ioredisClient = null;
|
||||
|
||||
// ConnectRedis might accept null client but would fail on first use
|
||||
// The important thing is it doesn't throw uncaught exceptions during construction
|
||||
const store = sessionCache(namespace);
|
||||
expect(store).toBeDefined();
|
||||
|
||||
// Restore original client
|
||||
require('./redisClients').ioredisClient = originalClient;
|
||||
});
|
||||
});
|
||||
|
||||
describe('limiterCache', () => {
|
||||
it('should return undefined when USE_REDIS is false', () => {
|
||||
cacheConfig.USE_REDIS = false;
|
||||
const result = limiterCache('prefix');
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return RedisStore when USE_REDIS is true', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
const result = limiterCache('rate-limit');
|
||||
|
||||
expect(mockRedisStore).toHaveBeenCalledWith({
|
||||
sendCommand: expect.any(Function),
|
||||
prefix: `rate-limit:`,
|
||||
});
|
||||
expect(result).toBe(mockRedisStore());
|
||||
});
|
||||
|
||||
it('should add colon to prefix if not present', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
limiterCache('rate-limit');
|
||||
|
||||
expect(mockRedisStore).toHaveBeenCalledWith({
|
||||
sendCommand: expect.any(Function),
|
||||
prefix: 'rate-limit:',
|
||||
});
|
||||
});
|
||||
|
||||
it('should not add colon to prefix if already present', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
limiterCache('rate-limit:');
|
||||
|
||||
expect(mockRedisStore).toHaveBeenCalledWith({
|
||||
sendCommand: expect.any(Function),
|
||||
prefix: 'rate-limit:',
|
||||
});
|
||||
});
|
||||
|
||||
it('should pass sendCommand function that calls ioredisClient.call', async () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
mockIoredisClient.call.mockResolvedValue('test-value');
|
||||
|
||||
limiterCache('rate-limit');
|
||||
|
||||
const sendCommandCall = mockRedisStore.mock.calls[0][0];
|
||||
const sendCommand = sendCommandCall.sendCommand;
|
||||
|
||||
// Test that sendCommand properly delegates to ioredisClient.call
|
||||
const args = ['GET', 'test-key'];
|
||||
const result = await sendCommand(...args);
|
||||
|
||||
expect(mockIoredisClient.call).toHaveBeenCalledWith(...args);
|
||||
expect(result).toBe('test-value');
|
||||
});
|
||||
|
||||
it('should handle sendCommand errors properly', async () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
|
||||
// Mock the call method to reject with an error
|
||||
const testError = new Error('Redis error');
|
||||
mockIoredisClient.call.mockRejectedValue(testError);
|
||||
|
||||
limiterCache('rate-limit');
|
||||
|
||||
const sendCommandCall = mockRedisStore.mock.calls[0][0];
|
||||
const sendCommand = sendCommandCall.sendCommand;
|
||||
|
||||
// Test that sendCommand properly handles errors
|
||||
const args = ['GET', 'test-key'];
|
||||
|
||||
await expect(sendCommand(...args)).rejects.toThrow('Redis error');
|
||||
expect(mockIoredisClient.call).toHaveBeenCalledWith(...args);
|
||||
});
|
||||
|
||||
it('should handle undefined prefix', () => {
|
||||
cacheConfig.USE_REDIS = true;
|
||||
expect(() => limiterCache()).toThrow('prefix is required');
|
||||
});
|
||||
});
|
||||
});
|
||||
14
api/cache/getLogStores.js
vendored
14
api/cache/getLogStores.js
vendored
@@ -1,9 +1,13 @@
|
||||
const { cacheConfig } = require('./cacheConfig');
|
||||
const { Keyv } = require('keyv');
|
||||
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
|
||||
const { logFile } = require('./keyvFiles');
|
||||
const keyvMongo = require('./keyvMongo');
|
||||
const { standardCache, sessionCache, violationCache } = require('./cacheFactory');
|
||||
const { Time, CacheKeys, ViolationTypes } = require('librechat-data-provider');
|
||||
const {
|
||||
logFile,
|
||||
keyvMongo,
|
||||
cacheConfig,
|
||||
sessionCache,
|
||||
standardCache,
|
||||
violationCache,
|
||||
} = require('@librechat/api');
|
||||
|
||||
const namespaces = {
|
||||
[ViolationTypes.GENERAL]: new Keyv({ store: logFile, namespace: 'violations' }),
|
||||
|
||||
3
api/cache/index.js
vendored
3
api/cache/index.js
vendored
@@ -1,5 +1,4 @@
|
||||
const keyvFiles = require('./keyvFiles');
|
||||
const getLogStores = require('./getLogStores');
|
||||
const logViolation = require('./logViolation');
|
||||
|
||||
module.exports = { ...keyvFiles, getLogStores, logViolation };
|
||||
module.exports = { getLogStores, logViolation };
|
||||
|
||||
9
api/cache/keyvFiles.js
vendored
9
api/cache/keyvFiles.js
vendored
@@ -1,9 +0,0 @@
|
||||
const { KeyvFile } = require('keyv-file');
|
||||
|
||||
const logFile = new KeyvFile({ filename: './data/logs.json' }).setMaxListeners(20);
|
||||
const violationFile = new KeyvFile({ filename: './data/violations.json' }).setMaxListeners(20);
|
||||
|
||||
module.exports = {
|
||||
logFile,
|
||||
violationFile,
|
||||
};
|
||||
@@ -29,12 +29,64 @@ class MeiliSearchClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes documents from MeiliSearch index that are missing the user field
|
||||
* @param {import('meilisearch').Index} index - MeiliSearch index instance
|
||||
* @param {string} indexName - Name of the index for logging
|
||||
* @returns {Promise<number>} - Number of documents deleted
|
||||
*/
|
||||
async function deleteDocumentsWithoutUserField(index, indexName) {
|
||||
let deletedCount = 0;
|
||||
let offset = 0;
|
||||
const batchSize = 1000;
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const searchResult = await index.search('', {
|
||||
limit: batchSize,
|
||||
offset: offset,
|
||||
});
|
||||
|
||||
if (searchResult.hits.length === 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
const idsToDelete = searchResult.hits.filter((hit) => !hit.user).map((hit) => hit.id);
|
||||
|
||||
if (idsToDelete.length > 0) {
|
||||
logger.info(
|
||||
`[indexSync] Deleting ${idsToDelete.length} documents without user field from ${indexName} index`,
|
||||
);
|
||||
await index.deleteDocuments(idsToDelete);
|
||||
deletedCount += idsToDelete.length;
|
||||
}
|
||||
|
||||
if (searchResult.hits.length < batchSize) {
|
||||
break;
|
||||
}
|
||||
|
||||
offset += batchSize;
|
||||
}
|
||||
|
||||
if (deletedCount > 0) {
|
||||
logger.info(`[indexSync] Deleted ${deletedCount} orphaned documents from ${indexName} index`);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`[indexSync] Error deleting documents from ${indexName}:`, error);
|
||||
}
|
||||
|
||||
return deletedCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensures indexes have proper filterable attributes configured and checks if documents have user field
|
||||
* @param {MeiliSearch} client - MeiliSearch client instance
|
||||
* @returns {Promise<boolean>} - true if configuration was updated or re-sync is needed
|
||||
* @returns {Promise<{settingsUpdated: boolean, orphanedDocsFound: boolean}>} - Status of what was done
|
||||
*/
|
||||
async function ensureFilterableAttributes(client) {
|
||||
let settingsUpdated = false;
|
||||
let hasOrphanedDocs = false;
|
||||
|
||||
try {
|
||||
// Check and update messages index
|
||||
try {
|
||||
@@ -47,16 +99,17 @@ async function ensureFilterableAttributes(client) {
|
||||
filterableAttributes: ['user'],
|
||||
});
|
||||
logger.info('[indexSync] Messages index configured for user filtering');
|
||||
logger.info('[indexSync] Index configuration updated. Full re-sync will be triggered.');
|
||||
return true;
|
||||
settingsUpdated = true;
|
||||
}
|
||||
|
||||
// Check if existing documents have user field indexed
|
||||
try {
|
||||
const searchResult = await messagesIndex.search('', { limit: 1 });
|
||||
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
|
||||
logger.info('[indexSync] Existing messages missing user field, re-sync needed');
|
||||
return true;
|
||||
logger.info(
|
||||
'[indexSync] Existing messages missing user field, will clean up orphaned documents...',
|
||||
);
|
||||
hasOrphanedDocs = true;
|
||||
}
|
||||
} catch (searchError) {
|
||||
logger.debug('[indexSync] Could not check message documents:', searchError.message);
|
||||
@@ -78,16 +131,17 @@ async function ensureFilterableAttributes(client) {
|
||||
filterableAttributes: ['user'],
|
||||
});
|
||||
logger.info('[indexSync] Convos index configured for user filtering');
|
||||
logger.info('[indexSync] Index configuration updated. Full re-sync will be triggered.');
|
||||
return true;
|
||||
settingsUpdated = true;
|
||||
}
|
||||
|
||||
// Check if existing documents have user field indexed
|
||||
try {
|
||||
const searchResult = await convosIndex.search('', { limit: 1 });
|
||||
if (searchResult.hits.length > 0 && !searchResult.hits[0].user) {
|
||||
logger.info('[indexSync] Existing conversations missing user field, re-sync needed');
|
||||
return true;
|
||||
logger.info(
|
||||
'[indexSync] Existing conversations missing user field, will clean up orphaned documents...',
|
||||
);
|
||||
hasOrphanedDocs = true;
|
||||
}
|
||||
} catch (searchError) {
|
||||
logger.debug('[indexSync] Could not check conversation documents:', searchError.message);
|
||||
@@ -97,101 +151,143 @@ async function ensureFilterableAttributes(client) {
|
||||
logger.warn('[indexSync] Could not check/update convos index settings:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
// If either index has orphaned documents, clean them up (but don't force resync)
|
||||
if (hasOrphanedDocs) {
|
||||
try {
|
||||
const messagesIndex = client.index('messages');
|
||||
await deleteDocumentsWithoutUserField(messagesIndex, 'messages');
|
||||
} catch (error) {
|
||||
logger.debug('[indexSync] Could not clean up messages:', error.message);
|
||||
}
|
||||
|
||||
try {
|
||||
const convosIndex = client.index('convos');
|
||||
await deleteDocumentsWithoutUserField(convosIndex, 'convos');
|
||||
} catch (error) {
|
||||
logger.debug('[indexSync] Could not clean up convos:', error.message);
|
||||
}
|
||||
|
||||
logger.info('[indexSync] Orphaned documents cleaned up without forcing resync.');
|
||||
}
|
||||
|
||||
if (settingsUpdated) {
|
||||
logger.info('[indexSync] Index settings updated. Full re-sync will be triggered.');
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('[indexSync] Error ensuring filterable attributes:', error);
|
||||
}
|
||||
|
||||
return false;
|
||||
return { settingsUpdated, orphanedDocsFound: hasOrphanedDocs };
|
||||
}
|
||||
|
||||
/**
|
||||
* Performs the actual sync operations for messages and conversations
|
||||
* @param {FlowStateManager} flowManager - Flow state manager instance
|
||||
* @param {string} flowId - Flow identifier
|
||||
* @param {string} flowType - Flow type
|
||||
*/
|
||||
async function performSync() {
|
||||
const client = MeiliSearchClient.getInstance();
|
||||
async function performSync(flowManager, flowId, flowType) {
|
||||
try {
|
||||
const client = MeiliSearchClient.getInstance();
|
||||
|
||||
const { status } = await client.health();
|
||||
if (status !== 'available') {
|
||||
throw new Error('Meilisearch not available');
|
||||
}
|
||||
|
||||
if (indexingDisabled === true) {
|
||||
logger.info('[indexSync] Indexing is disabled, skipping...');
|
||||
return { messagesSync: false, convosSync: false };
|
||||
}
|
||||
|
||||
/** Ensures indexes have proper filterable attributes configured */
|
||||
const configUpdated = await ensureFilterableAttributes(client);
|
||||
|
||||
let messagesSync = false;
|
||||
let convosSync = false;
|
||||
|
||||
// If configuration was just updated or documents are missing user field, force a full re-sync
|
||||
if (configUpdated) {
|
||||
logger.info('[indexSync] Forcing full re-sync to ensure user field is properly indexed...');
|
||||
|
||||
// Reset sync flags to force full re-sync
|
||||
await Message.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
|
||||
await Conversation.collection.updateMany(
|
||||
{ _meiliIndex: true },
|
||||
{ $set: { _meiliIndex: false } },
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync messages
|
||||
const messageProgress = await Message.getSyncProgress();
|
||||
if (!messageProgress.isComplete || configUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
// Check if we should do a full sync or incremental
|
||||
const messageCount = await Message.countDocuments();
|
||||
const messagesIndexed = messageProgress.totalProcessed;
|
||||
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||
|
||||
if (messageCount - messagesIndexed > syncThreshold) {
|
||||
logger.info('[indexSync] Starting full message sync due to large difference');
|
||||
await Message.syncWithMeili();
|
||||
messagesSync = true;
|
||||
} else if (messageCount !== messagesIndexed) {
|
||||
logger.warn('[indexSync] Messages out of sync, performing incremental sync');
|
||||
await Message.syncWithMeili();
|
||||
messagesSync = true;
|
||||
const { status } = await client.health();
|
||||
if (status !== 'available') {
|
||||
throw new Error('Meilisearch not available');
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`[indexSync] Messages are fully synced: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync conversations
|
||||
const convoProgress = await Conversation.getSyncProgress();
|
||||
if (!convoProgress.isComplete || configUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
const convoCount = await Conversation.countDocuments();
|
||||
const convosIndexed = convoProgress.totalProcessed;
|
||||
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||
|
||||
if (convoCount - convosIndexed > syncThreshold) {
|
||||
logger.info('[indexSync] Starting full conversation sync due to large difference');
|
||||
await Conversation.syncWithMeili();
|
||||
convosSync = true;
|
||||
} else if (convoCount !== convosIndexed) {
|
||||
logger.warn('[indexSync] Convos out of sync, performing incremental sync');
|
||||
await Conversation.syncWithMeili();
|
||||
convosSync = true;
|
||||
if (indexingDisabled === true) {
|
||||
logger.info('[indexSync] Indexing is disabled, skipping...');
|
||||
return { messagesSync: false, convosSync: false };
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`[indexSync] Conversations are fully synced: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments}`,
|
||||
);
|
||||
}
|
||||
|
||||
return { messagesSync, convosSync };
|
||||
/** Ensures indexes have proper filterable attributes configured */
|
||||
const { settingsUpdated, orphanedDocsFound: _orphanedDocsFound } =
|
||||
await ensureFilterableAttributes(client);
|
||||
|
||||
let messagesSync = false;
|
||||
let convosSync = false;
|
||||
|
||||
// Only reset flags if settings were actually updated (not just for orphaned doc cleanup)
|
||||
if (settingsUpdated) {
|
||||
logger.info(
|
||||
'[indexSync] Settings updated. Forcing full re-sync to reindex with new configuration...',
|
||||
);
|
||||
|
||||
// Reset sync flags to force full re-sync
|
||||
await Message.collection.updateMany({ _meiliIndex: true }, { $set: { _meiliIndex: false } });
|
||||
await Conversation.collection.updateMany(
|
||||
{ _meiliIndex: true },
|
||||
{ $set: { _meiliIndex: false } },
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync messages
|
||||
const messageProgress = await Message.getSyncProgress();
|
||||
if (!messageProgress.isComplete || settingsUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Messages need syncing: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
// Check if we should do a full sync or incremental
|
||||
const messageCount = await Message.countDocuments();
|
||||
const messagesIndexed = messageProgress.totalProcessed;
|
||||
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||
|
||||
if (messageCount - messagesIndexed > syncThreshold) {
|
||||
logger.info('[indexSync] Starting full message sync due to large difference');
|
||||
await Message.syncWithMeili();
|
||||
messagesSync = true;
|
||||
} else if (messageCount !== messagesIndexed) {
|
||||
logger.warn('[indexSync] Messages out of sync, performing incremental sync');
|
||||
await Message.syncWithMeili();
|
||||
messagesSync = true;
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`[indexSync] Messages are fully synced: ${messageProgress.totalProcessed}/${messageProgress.totalDocuments}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Check if we need to sync conversations
|
||||
const convoProgress = await Conversation.getSyncProgress();
|
||||
if (!convoProgress.isComplete || settingsUpdated) {
|
||||
logger.info(
|
||||
`[indexSync] Conversations need syncing: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments} indexed`,
|
||||
);
|
||||
|
||||
const convoCount = await Conversation.countDocuments();
|
||||
const convosIndexed = convoProgress.totalProcessed;
|
||||
const syncThreshold = parseInt(process.env.MEILI_SYNC_THRESHOLD || '1000', 10);
|
||||
|
||||
if (convoCount - convosIndexed > syncThreshold) {
|
||||
logger.info('[indexSync] Starting full conversation sync due to large difference');
|
||||
await Conversation.syncWithMeili();
|
||||
convosSync = true;
|
||||
} else if (convoCount !== convosIndexed) {
|
||||
logger.warn('[indexSync] Convos out of sync, performing incremental sync');
|
||||
await Conversation.syncWithMeili();
|
||||
convosSync = true;
|
||||
}
|
||||
} else {
|
||||
logger.info(
|
||||
`[indexSync] Conversations are fully synced: ${convoProgress.totalProcessed}/${convoProgress.totalDocuments}`,
|
||||
);
|
||||
}
|
||||
|
||||
return { messagesSync, convosSync };
|
||||
} finally {
|
||||
if (indexingDisabled === true) {
|
||||
logger.info('[indexSync] Indexing is disabled, skipping cleanup...');
|
||||
} else if (flowManager && flowId && flowType) {
|
||||
try {
|
||||
await flowManager.deleteFlow(flowId, flowType);
|
||||
logger.debug('[indexSync] Flow state cleaned up');
|
||||
} catch (cleanupErr) {
|
||||
logger.debug('[indexSync] Could not clean up flow state:', cleanupErr.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -204,24 +300,26 @@ async function indexSync() {
|
||||
|
||||
logger.info('[indexSync] Starting index synchronization check...');
|
||||
|
||||
// Get or create FlowStateManager instance
|
||||
const flowsCache = getLogStores(CacheKeys.FLOWS);
|
||||
if (!flowsCache) {
|
||||
logger.warn('[indexSync] Flows cache not available, falling back to direct sync');
|
||||
return await performSync(null, null, null);
|
||||
}
|
||||
|
||||
const flowManager = new FlowStateManager(flowsCache, {
|
||||
ttl: 60000 * 10, // 10 minutes TTL for sync operations
|
||||
});
|
||||
|
||||
// Use a unique flow ID for the sync operation
|
||||
const flowId = 'meili-index-sync';
|
||||
const flowType = 'MEILI_SYNC';
|
||||
|
||||
try {
|
||||
// Get or create FlowStateManager instance
|
||||
const flowsCache = getLogStores(CacheKeys.FLOWS);
|
||||
if (!flowsCache) {
|
||||
logger.warn('[indexSync] Flows cache not available, falling back to direct sync');
|
||||
return await performSync();
|
||||
}
|
||||
|
||||
const flowManager = new FlowStateManager(flowsCache, {
|
||||
ttl: 60000 * 10, // 10 minutes TTL for sync operations
|
||||
});
|
||||
|
||||
// Use a unique flow ID for the sync operation
|
||||
const flowId = 'meili-index-sync';
|
||||
const flowType = 'MEILI_SYNC';
|
||||
|
||||
// This will only execute the handler if no other instance is running the sync
|
||||
const result = await flowManager.createFlowWithHandler(flowId, flowType, performSync);
|
||||
const result = await flowManager.createFlowWithHandler(flowId, flowType, () =>
|
||||
performSync(flowManager, flowId, flowType),
|
||||
);
|
||||
|
||||
if (result.messagesSync || result.convosSync) {
|
||||
logger.info('[indexSync] Sync completed successfully');
|
||||
|
||||
@@ -62,25 +62,37 @@ const getAgents = async (searchParameter) => await Agent.find(searchParameter).l
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.spec
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.endpoint
|
||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _m }) => {
|
||||
const loadEphemeralAgent = async ({ req, spec, agent_id, endpoint, model_parameters: _m }) => {
|
||||
const { model, ...model_parameters } = _m;
|
||||
const modelSpecs = req.config?.modelSpecs?.list;
|
||||
/** @type {TModelSpec | null} */
|
||||
let modelSpec = null;
|
||||
if (spec != null && spec !== '') {
|
||||
modelSpec = modelSpecs?.find((s) => s.name === spec) || null;
|
||||
}
|
||||
/** @type {TEphemeralAgent | null} */
|
||||
const ephemeralAgent = req.body.ephemeralAgent;
|
||||
const mcpServers = new Set(ephemeralAgent?.mcp);
|
||||
if (modelSpec?.mcpServers) {
|
||||
for (const mcpServer of modelSpec.mcpServers) {
|
||||
mcpServers.add(mcpServer);
|
||||
}
|
||||
}
|
||||
/** @type {string[]} */
|
||||
const tools = [];
|
||||
if (ephemeralAgent?.execute_code === true) {
|
||||
if (ephemeralAgent?.execute_code === true || modelSpec?.executeCode === true) {
|
||||
tools.push(Tools.execute_code);
|
||||
}
|
||||
if (ephemeralAgent?.file_search === true) {
|
||||
if (ephemeralAgent?.file_search === true || modelSpec?.fileSearch === true) {
|
||||
tools.push(Tools.file_search);
|
||||
}
|
||||
if (ephemeralAgent?.web_search === true) {
|
||||
if (ephemeralAgent?.web_search === true || modelSpec?.webSearch === true) {
|
||||
tools.push(Tools.web_search);
|
||||
}
|
||||
|
||||
@@ -122,17 +134,18 @@ const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _
|
||||
*
|
||||
* @param {Object} params
|
||||
* @param {ServerRequest} params.req
|
||||
* @param {string} params.spec
|
||||
* @param {string} params.agent_id
|
||||
* @param {string} params.endpoint
|
||||
* @param {import('@librechat/agents').ClientOptions} [params.model_parameters]
|
||||
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
|
||||
*/
|
||||
const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
|
||||
const loadAgent = async ({ req, spec, agent_id, endpoint, model_parameters }) => {
|
||||
if (!agent_id) {
|
||||
return null;
|
||||
}
|
||||
if (agent_id === EPHEMERAL_AGENT_ID) {
|
||||
return await loadEphemeralAgent({ req, agent_id, endpoint, model_parameters });
|
||||
return await loadEphemeralAgent({ req, spec, agent_id, endpoint, model_parameters });
|
||||
}
|
||||
const agent = await getAgent({
|
||||
id: agent_id,
|
||||
|
||||
267
api/models/tx.js
267
api/models/tx.js
@@ -1,4 +1,4 @@
|
||||
const { matchModelName } = require('@librechat/api');
|
||||
const { matchModelName, findMatchingPattern } = require('@librechat/api');
|
||||
const defaultRate = 6;
|
||||
|
||||
/**
|
||||
@@ -6,44 +6,58 @@ const defaultRate = 6;
|
||||
* source: https://aws.amazon.com/bedrock/pricing/
|
||||
* */
|
||||
const bedrockValues = {
|
||||
// Basic llama2 patterns
|
||||
// Basic llama2 patterns (base defaults to smallest variant)
|
||||
llama2: { prompt: 0.75, completion: 1.0 },
|
||||
'llama-2': { prompt: 0.75, completion: 1.0 },
|
||||
'llama2-13b': { prompt: 0.75, completion: 1.0 },
|
||||
'llama2:13b': { prompt: 0.75, completion: 1.0 },
|
||||
'llama2:70b': { prompt: 1.95, completion: 2.56 },
|
||||
'llama2-70b': { prompt: 1.95, completion: 2.56 },
|
||||
|
||||
// Basic llama3 patterns
|
||||
// Basic llama3 patterns (base defaults to smallest variant)
|
||||
llama3: { prompt: 0.3, completion: 0.6 },
|
||||
'llama-3': { prompt: 0.3, completion: 0.6 },
|
||||
'llama3-8b': { prompt: 0.3, completion: 0.6 },
|
||||
'llama3:8b': { prompt: 0.3, completion: 0.6 },
|
||||
'llama3-70b': { prompt: 2.65, completion: 3.5 },
|
||||
'llama3:70b': { prompt: 2.65, completion: 3.5 },
|
||||
|
||||
// llama3-x-Nb pattern
|
||||
// llama3-x-Nb pattern (base defaults to smallest variant)
|
||||
'llama3-1': { prompt: 0.22, completion: 0.22 },
|
||||
'llama3-1-8b': { prompt: 0.22, completion: 0.22 },
|
||||
'llama3-1-70b': { prompt: 0.72, completion: 0.72 },
|
||||
'llama3-1-405b': { prompt: 2.4, completion: 2.4 },
|
||||
'llama3-2': { prompt: 0.1, completion: 0.1 },
|
||||
'llama3-2-1b': { prompt: 0.1, completion: 0.1 },
|
||||
'llama3-2-3b': { prompt: 0.15, completion: 0.15 },
|
||||
'llama3-2-11b': { prompt: 0.16, completion: 0.16 },
|
||||
'llama3-2-90b': { prompt: 0.72, completion: 0.72 },
|
||||
'llama3-3': { prompt: 2.65, completion: 3.5 },
|
||||
'llama3-3-70b': { prompt: 2.65, completion: 3.5 },
|
||||
|
||||
// llama3.x:Nb pattern
|
||||
// llama3.x:Nb pattern (base defaults to smallest variant)
|
||||
'llama3.1': { prompt: 0.22, completion: 0.22 },
|
||||
'llama3.1:8b': { prompt: 0.22, completion: 0.22 },
|
||||
'llama3.1:70b': { prompt: 0.72, completion: 0.72 },
|
||||
'llama3.1:405b': { prompt: 2.4, completion: 2.4 },
|
||||
'llama3.2': { prompt: 0.1, completion: 0.1 },
|
||||
'llama3.2:1b': { prompt: 0.1, completion: 0.1 },
|
||||
'llama3.2:3b': { prompt: 0.15, completion: 0.15 },
|
||||
'llama3.2:11b': { prompt: 0.16, completion: 0.16 },
|
||||
'llama3.2:90b': { prompt: 0.72, completion: 0.72 },
|
||||
'llama3.3': { prompt: 2.65, completion: 3.5 },
|
||||
'llama3.3:70b': { prompt: 2.65, completion: 3.5 },
|
||||
|
||||
// llama-3.x-Nb pattern
|
||||
// llama-3.x-Nb pattern (base defaults to smallest variant)
|
||||
'llama-3.1': { prompt: 0.22, completion: 0.22 },
|
||||
'llama-3.1-8b': { prompt: 0.22, completion: 0.22 },
|
||||
'llama-3.1-70b': { prompt: 0.72, completion: 0.72 },
|
||||
'llama-3.1-405b': { prompt: 2.4, completion: 2.4 },
|
||||
'llama-3.2': { prompt: 0.1, completion: 0.1 },
|
||||
'llama-3.2-1b': { prompt: 0.1, completion: 0.1 },
|
||||
'llama-3.2-3b': { prompt: 0.15, completion: 0.15 },
|
||||
'llama-3.2-11b': { prompt: 0.16, completion: 0.16 },
|
||||
'llama-3.2-90b': { prompt: 0.72, completion: 0.72 },
|
||||
'llama-3.3': { prompt: 2.65, completion: 3.5 },
|
||||
'llama-3.3-70b': { prompt: 2.65, completion: 3.5 },
|
||||
'mistral-7b': { prompt: 0.15, completion: 0.2 },
|
||||
'mistral-small': { prompt: 0.15, completion: 0.2 },
|
||||
@@ -52,15 +66,19 @@ const bedrockValues = {
|
||||
'mistral-large-2407': { prompt: 3.0, completion: 9.0 },
|
||||
'command-text': { prompt: 1.5, completion: 2.0 },
|
||||
'command-light': { prompt: 0.3, completion: 0.6 },
|
||||
'ai21.j2-mid-v1': { prompt: 12.5, completion: 12.5 },
|
||||
'ai21.j2-ultra-v1': { prompt: 18.8, completion: 18.8 },
|
||||
'ai21.jamba-instruct-v1:0': { prompt: 0.5, completion: 0.7 },
|
||||
'amazon.titan-text-lite-v1': { prompt: 0.15, completion: 0.2 },
|
||||
'amazon.titan-text-express-v1': { prompt: 0.2, completion: 0.6 },
|
||||
'amazon.titan-text-premier-v1:0': { prompt: 0.5, completion: 1.5 },
|
||||
'amazon.nova-micro-v1:0': { prompt: 0.035, completion: 0.14 },
|
||||
'amazon.nova-lite-v1:0': { prompt: 0.06, completion: 0.24 },
|
||||
'amazon.nova-pro-v1:0': { prompt: 0.8, completion: 3.2 },
|
||||
// AI21 models
|
||||
'j2-mid': { prompt: 12.5, completion: 12.5 },
|
||||
'j2-ultra': { prompt: 18.8, completion: 18.8 },
|
||||
'jamba-instruct': { prompt: 0.5, completion: 0.7 },
|
||||
// Amazon Titan models
|
||||
'titan-text-lite': { prompt: 0.15, completion: 0.2 },
|
||||
'titan-text-express': { prompt: 0.2, completion: 0.6 },
|
||||
'titan-text-premier': { prompt: 0.5, completion: 1.5 },
|
||||
// Amazon Nova models
|
||||
'nova-micro': { prompt: 0.035, completion: 0.14 },
|
||||
'nova-lite': { prompt: 0.06, completion: 0.24 },
|
||||
'nova-pro': { prompt: 0.8, completion: 3.2 },
|
||||
'nova-premier': { prompt: 2.5, completion: 12.5 },
|
||||
'deepseek.r1': { prompt: 1.35, completion: 5.4 },
|
||||
};
|
||||
|
||||
@@ -71,89 +89,136 @@ const bedrockValues = {
|
||||
*/
|
||||
const tokenValues = Object.assign(
|
||||
{
|
||||
// Legacy token size mappings (generic patterns - check LAST)
|
||||
'8k': { prompt: 30, completion: 60 },
|
||||
'32k': { prompt: 60, completion: 120 },
|
||||
'4k': { prompt: 1.5, completion: 2 },
|
||||
'16k': { prompt: 3, completion: 4 },
|
||||
// Generic fallback patterns (check LAST)
|
||||
'claude-': { prompt: 0.8, completion: 2.4 },
|
||||
deepseek: { prompt: 0.28, completion: 0.42 },
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
gemma: { prompt: 0.02, completion: 0.04 }, // Base pattern (using gemma-3n-e4b pricing)
|
||||
gemini: { prompt: 0.5, completion: 1.5 },
|
||||
'gpt-oss': { prompt: 0.05, completion: 0.2 },
|
||||
// Specific model variants (check FIRST - more specific patterns at end)
|
||||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||
'o4-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'o3-mini': { prompt: 1.1, completion: 4.4 },
|
||||
o3: { prompt: 2, completion: 8 },
|
||||
'o1-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'o1-preview': { prompt: 15, completion: 60 },
|
||||
o1: { prompt: 15, completion: 60 },
|
||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||
'gpt-4.1': { prompt: 2, completion: 8 },
|
||||
'gpt-4.1-nano': { prompt: 0.1, completion: 0.4 },
|
||||
'gpt-4.1-mini': { prompt: 0.4, completion: 1.6 },
|
||||
'gpt-4.1': { prompt: 2, completion: 8 },
|
||||
'gpt-4.5': { prompt: 75, completion: 150 },
|
||||
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
|
||||
'gpt-5': { prompt: 1.25, completion: 10 },
|
||||
'gpt-5-mini': { prompt: 0.25, completion: 2 },
|
||||
'gpt-5-nano': { prompt: 0.05, completion: 0.4 },
|
||||
'gpt-4o': { prompt: 2.5, completion: 10 },
|
||||
'gpt-4o-2024-05-13': { prompt: 5, completion: 15 },
|
||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
|
||||
'gpt-5': { prompt: 1.25, completion: 10 },
|
||||
'gpt-5-nano': { prompt: 0.05, completion: 0.4 },
|
||||
'gpt-5-mini': { prompt: 0.25, completion: 2 },
|
||||
'gpt-5-pro': { prompt: 15, completion: 120 },
|
||||
o1: { prompt: 15, completion: 60 },
|
||||
'o1-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'o1-preview': { prompt: 15, completion: 60 },
|
||||
o3: { prompt: 2, completion: 8 },
|
||||
'o3-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'o4-mini': { prompt: 1.1, completion: 4.4 },
|
||||
'claude-instant': { prompt: 0.8, completion: 2.4 },
|
||||
'claude-2': { prompt: 8, completion: 24 },
|
||||
'claude-2.1': { prompt: 8, completion: 24 },
|
||||
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
||||
'claude-3-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
|
||||
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
|
||||
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3.5-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-7-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3.7-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
|
||||
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
|
||||
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
||||
'claude-sonnet-4': { prompt: 3, completion: 15 },
|
||||
'claude-haiku-4-5': { prompt: 1, completion: 5 },
|
||||
'claude-opus-4': { prompt: 15, completion: 75 },
|
||||
'claude-2.1': { prompt: 8, completion: 24 },
|
||||
'claude-2': { prompt: 8, completion: 24 },
|
||||
'claude-instant': { prompt: 0.8, completion: 2.4 },
|
||||
'claude-': { prompt: 0.8, completion: 2.4 },
|
||||
'command-r-plus': { prompt: 3, completion: 15 },
|
||||
'claude-sonnet-4': { prompt: 3, completion: 15 },
|
||||
'command-r': { prompt: 0.5, completion: 1.5 },
|
||||
'command-r-plus': { prompt: 3, completion: 15 },
|
||||
'command-text': { prompt: 1.5, completion: 2.0 },
|
||||
'deepseek-reasoner': { prompt: 0.28, completion: 0.42 },
|
||||
deepseek: { prompt: 0.28, completion: 0.42 },
|
||||
/* cohere doesn't have rates for the older command models,
|
||||
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
gemma: { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-2': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-3': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemma-3-27b': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 },
|
||||
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
|
||||
'gemini-2.5-pro': { prompt: 1.25, completion: 10 },
|
||||
'gemini-2.5-flash': { prompt: 0.3, completion: 2.5 },
|
||||
'gemini-2.5-flash-lite': { prompt: 0.075, completion: 0.4 },
|
||||
'gemini-2.5': { prompt: 0, completion: 0 }, // Free for a period of time
|
||||
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
|
||||
'deepseek-r1': { prompt: 0.4, completion: 2.0 },
|
||||
'deepseek-v3': { prompt: 0.2, completion: 0.8 },
|
||||
'gemma-2': { prompt: 0.01, completion: 0.03 }, // Base pattern (using gemma-2-9b pricing)
|
||||
'gemma-3': { prompt: 0.02, completion: 0.04 }, // Base pattern (using gemma-3n-e4b pricing)
|
||||
'gemma-3-27b': { prompt: 0.09, completion: 0.16 },
|
||||
'gemini-1.5': { prompt: 2.5, completion: 10 },
|
||||
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
|
||||
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-2.0': { prompt: 0.1, completion: 0.4 }, // Base pattern (using 2.0-flash pricing)
|
||||
'gemini-2.0-flash': { prompt: 0.1, completion: 0.4 },
|
||||
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
|
||||
'gemini-2.5': { prompt: 0.3, completion: 2.5 }, // Base pattern (using 2.5-flash pricing)
|
||||
'gemini-2.5-flash': { prompt: 0.3, completion: 2.5 },
|
||||
'gemini-2.5-flash-lite': { prompt: 0.1, completion: 0.4 },
|
||||
'gemini-2.5-pro': { prompt: 1.25, completion: 10 },
|
||||
'gemini-pro-vision': { prompt: 0.5, completion: 1.5 },
|
||||
gemini: { prompt: 0.5, completion: 1.5 },
|
||||
'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-vision': { prompt: 2.0, completion: 10.0 },
|
||||
grok: { prompt: 2.0, completion: 10.0 }, // Base pattern defaults to grok-2
|
||||
'grok-beta': { prompt: 5.0, completion: 15.0 },
|
||||
'grok-vision-beta': { prompt: 5.0, completion: 15.0 },
|
||||
'grok-2': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-1212': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-latest': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-3-mini-fast': { prompt: 0.6, completion: 4 },
|
||||
'grok-3-mini': { prompt: 0.3, completion: 0.5 },
|
||||
'grok-3-fast': { prompt: 5.0, completion: 25.0 },
|
||||
'grok-2-vision': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 },
|
||||
'grok-3': { prompt: 3.0, completion: 15.0 },
|
||||
'grok-3-fast': { prompt: 5.0, completion: 25.0 },
|
||||
'grok-3-mini': { prompt: 0.3, completion: 0.5 },
|
||||
'grok-3-mini-fast': { prompt: 0.6, completion: 4 },
|
||||
'grok-4': { prompt: 3.0, completion: 15.0 },
|
||||
'grok-beta': { prompt: 5.0, completion: 15.0 },
|
||||
'mistral-large': { prompt: 2.0, completion: 6.0 },
|
||||
'pixtral-large': { prompt: 2.0, completion: 6.0 },
|
||||
'mistral-saba': { prompt: 0.2, completion: 0.6 },
|
||||
codestral: { prompt: 0.3, completion: 0.9 },
|
||||
'ministral-8b': { prompt: 0.1, completion: 0.1 },
|
||||
'ministral-3b': { prompt: 0.04, completion: 0.04 },
|
||||
// GPT-OSS models
|
||||
'ministral-8b': { prompt: 0.1, completion: 0.1 },
|
||||
'mistral-nemo': { prompt: 0.15, completion: 0.15 },
|
||||
'mistral-saba': { prompt: 0.2, completion: 0.6 },
|
||||
'pixtral-large': { prompt: 2.0, completion: 6.0 },
|
||||
'mistral-large': { prompt: 2.0, completion: 6.0 },
|
||||
'mixtral-8x22b': { prompt: 0.65, completion: 0.65 },
|
||||
kimi: { prompt: 0.14, completion: 2.49 }, // Base pattern (using kimi-k2 pricing)
|
||||
// GPT-OSS models (specific sizes)
|
||||
'gpt-oss:20b': { prompt: 0.05, completion: 0.2 },
|
||||
'gpt-oss-20b': { prompt: 0.05, completion: 0.2 },
|
||||
'gpt-oss:120b': { prompt: 0.15, completion: 0.6 },
|
||||
'gpt-oss-120b': { prompt: 0.15, completion: 0.6 },
|
||||
// GLM models (Zhipu AI) - general to specific
|
||||
glm4: { prompt: 0.1, completion: 0.1 },
|
||||
'glm-4': { prompt: 0.1, completion: 0.1 },
|
||||
'glm-4-32b': { prompt: 0.1, completion: 0.1 },
|
||||
'glm-4.5': { prompt: 0.35, completion: 1.55 },
|
||||
'glm-4.5-air': { prompt: 0.14, completion: 0.86 },
|
||||
'glm-4.5v': { prompt: 0.6, completion: 1.8 },
|
||||
'glm-4.6': { prompt: 0.5, completion: 1.75 },
|
||||
// Qwen models
|
||||
qwen: { prompt: 0.08, completion: 0.33 }, // Qwen base pattern (using qwen2.5-72b pricing)
|
||||
'qwen2.5': { prompt: 0.08, completion: 0.33 }, // Qwen 2.5 base pattern
|
||||
'qwen-turbo': { prompt: 0.05, completion: 0.2 },
|
||||
'qwen-plus': { prompt: 0.4, completion: 1.2 },
|
||||
'qwen-max': { prompt: 1.6, completion: 6.4 },
|
||||
'qwq-32b': { prompt: 0.15, completion: 0.4 },
|
||||
// Qwen3 models
|
||||
qwen3: { prompt: 0.035, completion: 0.138 }, // Qwen3 base pattern (using qwen3-4b pricing)
|
||||
'qwen3-8b': { prompt: 0.035, completion: 0.138 },
|
||||
'qwen3-14b': { prompt: 0.05, completion: 0.22 },
|
||||
'qwen3-30b-a3b': { prompt: 0.06, completion: 0.22 },
|
||||
'qwen3-32b': { prompt: 0.05, completion: 0.2 },
|
||||
'qwen3-235b-a22b': { prompt: 0.08, completion: 0.55 },
|
||||
// Qwen3 VL (Vision-Language) models
|
||||
'qwen3-vl-8b-thinking': { prompt: 0.18, completion: 2.1 },
|
||||
'qwen3-vl-8b-instruct': { prompt: 0.18, completion: 0.69 },
|
||||
'qwen3-vl-30b-a3b': { prompt: 0.29, completion: 1.0 },
|
||||
'qwen3-vl-235b-a22b': { prompt: 0.3, completion: 1.2 },
|
||||
// Qwen3 specialized models
|
||||
'qwen3-max': { prompt: 1.2, completion: 6 },
|
||||
'qwen3-coder': { prompt: 0.22, completion: 0.95 },
|
||||
'qwen3-coder-30b-a3b': { prompt: 0.06, completion: 0.25 },
|
||||
'qwen3-coder-plus': { prompt: 1, completion: 5 },
|
||||
'qwen3-coder-flash': { prompt: 0.3, completion: 1.5 },
|
||||
'qwen3-next-80b-a3b': { prompt: 0.1, completion: 0.8 },
|
||||
},
|
||||
bedrockValues,
|
||||
);
|
||||
@@ -184,67 +249,39 @@ const cacheTokenValues = {
|
||||
* @returns {string|undefined} The key corresponding to the model name, or undefined if no match is found.
|
||||
*/
|
||||
const getValueKey = (model, endpoint) => {
|
||||
if (!model || typeof model !== 'string') {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Use findMatchingPattern directly against tokenValues for efficient lookup
|
||||
if (!endpoint || (typeof endpoint === 'string' && !tokenValues[endpoint])) {
|
||||
const matchedKey = findMatchingPattern(model, tokenValues);
|
||||
if (matchedKey) {
|
||||
return matchedKey;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: use matchModelName for edge cases and legacy handling
|
||||
const modelName = matchModelName(model, endpoint);
|
||||
if (!modelName) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Legacy token size mappings and aliases for older models
|
||||
if (modelName.includes('gpt-3.5-turbo-16k')) {
|
||||
return '16k';
|
||||
} else if (modelName.includes('gpt-3.5-turbo-0125')) {
|
||||
return 'gpt-3.5-turbo-0125';
|
||||
} else if (modelName.includes('gpt-3.5-turbo-1106')) {
|
||||
return 'gpt-3.5-turbo-1106';
|
||||
} else if (modelName.includes('gpt-3.5')) {
|
||||
return '4k';
|
||||
} else if (modelName.includes('o4-mini')) {
|
||||
return 'o4-mini';
|
||||
} else if (modelName.includes('o4')) {
|
||||
return 'o4';
|
||||
} else if (modelName.includes('o3-mini')) {
|
||||
return 'o3-mini';
|
||||
} else if (modelName.includes('o3')) {
|
||||
return 'o3';
|
||||
} else if (modelName.includes('o1-preview')) {
|
||||
return 'o1-preview';
|
||||
} else if (modelName.includes('o1-mini')) {
|
||||
return 'o1-mini';
|
||||
} else if (modelName.includes('o1')) {
|
||||
return 'o1';
|
||||
} else if (modelName.includes('gpt-4.5')) {
|
||||
return 'gpt-4.5';
|
||||
} else if (modelName.includes('gpt-4.1-nano')) {
|
||||
return 'gpt-4.1-nano';
|
||||
} else if (modelName.includes('gpt-4.1-mini')) {
|
||||
return 'gpt-4.1-mini';
|
||||
} else if (modelName.includes('gpt-4.1')) {
|
||||
return 'gpt-4.1';
|
||||
} else if (modelName.includes('gpt-4o-2024-05-13')) {
|
||||
return 'gpt-4o-2024-05-13';
|
||||
} else if (modelName.includes('gpt-5-nano')) {
|
||||
return 'gpt-5-nano';
|
||||
} else if (modelName.includes('gpt-5-mini')) {
|
||||
return 'gpt-5-mini';
|
||||
} else if (modelName.includes('gpt-5')) {
|
||||
return 'gpt-5';
|
||||
} else if (modelName.includes('gpt-4o-mini')) {
|
||||
return 'gpt-4o-mini';
|
||||
} else if (modelName.includes('gpt-4o')) {
|
||||
return 'gpt-4o';
|
||||
} else if (modelName.includes('gpt-4-vision')) {
|
||||
return 'gpt-4-1106';
|
||||
} else if (modelName.includes('gpt-4-1106')) {
|
||||
return 'gpt-4-1106';
|
||||
return 'gpt-4-1106'; // Alias for gpt-4-vision
|
||||
} else if (modelName.includes('gpt-4-0125')) {
|
||||
return 'gpt-4-1106';
|
||||
return 'gpt-4-1106'; // Alias for gpt-4-0125
|
||||
} else if (modelName.includes('gpt-4-turbo')) {
|
||||
return 'gpt-4-1106';
|
||||
return 'gpt-4-1106'; // Alias for gpt-4-turbo
|
||||
} else if (modelName.includes('gpt-4-32k')) {
|
||||
return '32k';
|
||||
} else if (modelName.includes('gpt-4')) {
|
||||
return '8k';
|
||||
} else if (tokenValues[modelName]) {
|
||||
return modelName;
|
||||
}
|
||||
|
||||
return undefined;
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
const { maxTokensMap } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
defaultRate,
|
||||
@@ -113,6 +114,14 @@ describe('getValueKey', () => {
|
||||
expect(getValueKey('gpt-5-nano-2025-01-30-0130')).toBe('gpt-5-nano');
|
||||
});
|
||||
|
||||
it('should return "gpt-5-pro" for model type of "gpt-5-pro"', () => {
|
||||
expect(getValueKey('gpt-5-pro-2025-01-30')).toBe('gpt-5-pro');
|
||||
expect(getValueKey('openai/gpt-5-pro')).toBe('gpt-5-pro');
|
||||
expect(getValueKey('gpt-5-pro-0130')).toBe('gpt-5-pro');
|
||||
expect(getValueKey('gpt-5-pro-2025-01-30-0130')).toBe('gpt-5-pro');
|
||||
expect(getValueKey('gpt-5-pro-preview')).toBe('gpt-5-pro');
|
||||
});
|
||||
|
||||
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
|
||||
expect(getValueKey('gpt-4o-2024-08-06')).toBe('gpt-4o');
|
||||
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o');
|
||||
@@ -184,6 +193,16 @@ describe('getValueKey', () => {
|
||||
expect(getValueKey('claude-3.5-haiku-turbo')).toBe('claude-3.5-haiku');
|
||||
expect(getValueKey('claude-3.5-haiku-0125')).toBe('claude-3.5-haiku');
|
||||
});
|
||||
|
||||
it('should return expected value keys for "gpt-oss" models', () => {
|
||||
expect(getValueKey('openai/gpt-oss-120b')).toBe('gpt-oss-120b');
|
||||
expect(getValueKey('openai/gpt-oss:120b')).toBe('gpt-oss:120b');
|
||||
expect(getValueKey('openai/gpt-oss-570b')).toBe('gpt-oss');
|
||||
expect(getValueKey('gpt-oss-570b')).toBe('gpt-oss');
|
||||
expect(getValueKey('groq/gpt-oss-1080b')).toBe('gpt-oss');
|
||||
expect(getValueKey('gpt-oss-20b')).toBe('gpt-oss-20b');
|
||||
expect(getValueKey('oai/gpt-oss:20b')).toBe('gpt-oss:20b');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getMultiplier', () => {
|
||||
@@ -278,6 +297,20 @@ describe('getMultiplier', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-5-pro', () => {
|
||||
const valueKey = getValueKey('gpt-5-pro-2025-01-30');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-5-pro'].prompt);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-5-pro'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'gpt-5-pro-preview', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-5-pro'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'openai/gpt-5-pro', tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-5-pro'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4o', () => {
|
||||
const valueKey = getValueKey('gpt-4o-2024-08-06');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
|
||||
@@ -394,6 +427,18 @@ describe('getMultiplier', () => {
|
||||
expect(getMultiplier({ model: key, tokenType: 'completion' })).toBe(expectedCompletion);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return correct multipliers for GLM models', () => {
|
||||
const models = ['glm-4.6', 'glm-4.5v', 'glm-4.5-air', 'glm-4.5', 'glm-4-32b', 'glm-4', 'glm4'];
|
||||
models.forEach((key) => {
|
||||
const expectedPrompt = tokenValues[key].prompt;
|
||||
const expectedCompletion = tokenValues[key].completion;
|
||||
expect(getMultiplier({ valueKey: key, tokenType: 'prompt' })).toBe(expectedPrompt);
|
||||
expect(getMultiplier({ valueKey: key, tokenType: 'completion' })).toBe(expectedCompletion);
|
||||
expect(getMultiplier({ model: key, tokenType: 'prompt' })).toBe(expectedPrompt);
|
||||
expect(getMultiplier({ model: key, tokenType: 'completion' })).toBe(expectedCompletion);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('AWS Bedrock Model Tests', () => {
|
||||
@@ -449,6 +494,249 @@ describe('AWS Bedrock Model Tests', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Amazon Model Tests', () => {
|
||||
describe('Amazon Nova Models', () => {
|
||||
it('should return correct pricing for nova-premier', () => {
|
||||
expect(getMultiplier({ model: 'nova-premier', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-premier'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'nova-premier', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-premier'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-premier-v1:0', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-premier'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-premier-v1:0', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-premier'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for nova-pro', () => {
|
||||
expect(getMultiplier({ model: 'nova-pro', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-pro'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'nova-pro', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-pro'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-pro-v1:0', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-pro'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-pro-v1:0', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-pro'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for nova-lite', () => {
|
||||
expect(getMultiplier({ model: 'nova-lite', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-lite'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'nova-lite', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-lite'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-lite-v1:0', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-lite'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-lite-v1:0', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-lite'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for nova-micro', () => {
|
||||
expect(getMultiplier({ model: 'nova-micro', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-micro'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'nova-micro', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-micro'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-micro-v1:0', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['nova-micro'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.nova-micro-v1:0', tokenType: 'completion' })).toBe(
|
||||
tokenValues['nova-micro'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should match both short and full model names to the same pricing', () => {
|
||||
const models = ['nova-micro', 'nova-lite', 'nova-pro', 'nova-premier'];
|
||||
const fullModels = [
|
||||
'amazon.nova-micro-v1:0',
|
||||
'amazon.nova-lite-v1:0',
|
||||
'amazon.nova-pro-v1:0',
|
||||
'amazon.nova-premier-v1:0',
|
||||
];
|
||||
|
||||
models.forEach((shortModel, i) => {
|
||||
const fullModel = fullModels[i];
|
||||
const shortPrompt = getMultiplier({ model: shortModel, tokenType: 'prompt' });
|
||||
const fullPrompt = getMultiplier({ model: fullModel, tokenType: 'prompt' });
|
||||
const shortCompletion = getMultiplier({ model: shortModel, tokenType: 'completion' });
|
||||
const fullCompletion = getMultiplier({ model: fullModel, tokenType: 'completion' });
|
||||
|
||||
expect(shortPrompt).toBe(fullPrompt);
|
||||
expect(shortCompletion).toBe(fullCompletion);
|
||||
expect(shortPrompt).toBe(tokenValues[shortModel].prompt);
|
||||
expect(shortCompletion).toBe(tokenValues[shortModel].completion);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Amazon Titan Models', () => {
|
||||
it('should return correct pricing for titan-text-premier', () => {
|
||||
expect(getMultiplier({ model: 'titan-text-premier', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['titan-text-premier'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'titan-text-premier', tokenType: 'completion' })).toBe(
|
||||
tokenValues['titan-text-premier'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.titan-text-premier-v1:0', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['titan-text-premier'].prompt,
|
||||
);
|
||||
expect(
|
||||
getMultiplier({ model: 'amazon.titan-text-premier-v1:0', tokenType: 'completion' }),
|
||||
).toBe(tokenValues['titan-text-premier'].completion);
|
||||
});
|
||||
|
||||
it('should return correct pricing for titan-text-express', () => {
|
||||
expect(getMultiplier({ model: 'titan-text-express', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['titan-text-express'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'titan-text-express', tokenType: 'completion' })).toBe(
|
||||
tokenValues['titan-text-express'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.titan-text-express-v1', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['titan-text-express'].prompt,
|
||||
);
|
||||
expect(
|
||||
getMultiplier({ model: 'amazon.titan-text-express-v1', tokenType: 'completion' }),
|
||||
).toBe(tokenValues['titan-text-express'].completion);
|
||||
});
|
||||
|
||||
it('should return correct pricing for titan-text-lite', () => {
|
||||
expect(getMultiplier({ model: 'titan-text-lite', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['titan-text-lite'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'titan-text-lite', tokenType: 'completion' })).toBe(
|
||||
tokenValues['titan-text-lite'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.titan-text-lite-v1', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['titan-text-lite'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'amazon.titan-text-lite-v1', tokenType: 'completion' })).toBe(
|
||||
tokenValues['titan-text-lite'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should match both short and full model names to the same pricing', () => {
|
||||
const models = ['titan-text-lite', 'titan-text-express', 'titan-text-premier'];
|
||||
const fullModels = [
|
||||
'amazon.titan-text-lite-v1',
|
||||
'amazon.titan-text-express-v1',
|
||||
'amazon.titan-text-premier-v1:0',
|
||||
];
|
||||
|
||||
models.forEach((shortModel, i) => {
|
||||
const fullModel = fullModels[i];
|
||||
const shortPrompt = getMultiplier({ model: shortModel, tokenType: 'prompt' });
|
||||
const fullPrompt = getMultiplier({ model: fullModel, tokenType: 'prompt' });
|
||||
const shortCompletion = getMultiplier({ model: shortModel, tokenType: 'completion' });
|
||||
const fullCompletion = getMultiplier({ model: fullModel, tokenType: 'completion' });
|
||||
|
||||
expect(shortPrompt).toBe(fullPrompt);
|
||||
expect(shortCompletion).toBe(fullCompletion);
|
||||
expect(shortPrompt).toBe(tokenValues[shortModel].prompt);
|
||||
expect(shortCompletion).toBe(tokenValues[shortModel].completion);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('AI21 Model Tests', () => {
|
||||
describe('AI21 J2 Models', () => {
|
||||
it('should return correct pricing for j2-mid', () => {
|
||||
expect(getMultiplier({ model: 'j2-mid', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['j2-mid'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'j2-mid', tokenType: 'completion' })).toBe(
|
||||
tokenValues['j2-mid'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'ai21.j2-mid-v1', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['j2-mid'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'ai21.j2-mid-v1', tokenType: 'completion' })).toBe(
|
||||
tokenValues['j2-mid'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for j2-ultra', () => {
|
||||
expect(getMultiplier({ model: 'j2-ultra', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['j2-ultra'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'j2-ultra', tokenType: 'completion' })).toBe(
|
||||
tokenValues['j2-ultra'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'ai21.j2-ultra-v1', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['j2-ultra'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'ai21.j2-ultra-v1', tokenType: 'completion' })).toBe(
|
||||
tokenValues['j2-ultra'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should match both short and full model names to the same pricing', () => {
|
||||
const models = ['j2-mid', 'j2-ultra'];
|
||||
const fullModels = ['ai21.j2-mid-v1', 'ai21.j2-ultra-v1'];
|
||||
|
||||
models.forEach((shortModel, i) => {
|
||||
const fullModel = fullModels[i];
|
||||
const shortPrompt = getMultiplier({ model: shortModel, tokenType: 'prompt' });
|
||||
const fullPrompt = getMultiplier({ model: fullModel, tokenType: 'prompt' });
|
||||
const shortCompletion = getMultiplier({ model: shortModel, tokenType: 'completion' });
|
||||
const fullCompletion = getMultiplier({ model: fullModel, tokenType: 'completion' });
|
||||
|
||||
expect(shortPrompt).toBe(fullPrompt);
|
||||
expect(shortCompletion).toBe(fullCompletion);
|
||||
expect(shortPrompt).toBe(tokenValues[shortModel].prompt);
|
||||
expect(shortCompletion).toBe(tokenValues[shortModel].completion);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('AI21 Jamba Models', () => {
|
||||
it('should return correct pricing for jamba-instruct', () => {
|
||||
expect(getMultiplier({ model: 'jamba-instruct', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['jamba-instruct'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'jamba-instruct', tokenType: 'completion' })).toBe(
|
||||
tokenValues['jamba-instruct'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'ai21.jamba-instruct-v1:0', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['jamba-instruct'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'ai21.jamba-instruct-v1:0', tokenType: 'completion' })).toBe(
|
||||
tokenValues['jamba-instruct'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should match both short and full model names to the same pricing', () => {
|
||||
const shortPrompt = getMultiplier({ model: 'jamba-instruct', tokenType: 'prompt' });
|
||||
const fullPrompt = getMultiplier({
|
||||
model: 'ai21.jamba-instruct-v1:0',
|
||||
tokenType: 'prompt',
|
||||
});
|
||||
const shortCompletion = getMultiplier({ model: 'jamba-instruct', tokenType: 'completion' });
|
||||
const fullCompletion = getMultiplier({
|
||||
model: 'ai21.jamba-instruct-v1:0',
|
||||
tokenType: 'completion',
|
||||
});
|
||||
|
||||
expect(shortPrompt).toBe(fullPrompt);
|
||||
expect(shortCompletion).toBe(fullCompletion);
|
||||
expect(shortPrompt).toBe(tokenValues['jamba-instruct'].prompt);
|
||||
expect(shortCompletion).toBe(tokenValues['jamba-instruct'].completion);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Deepseek Model Tests', () => {
|
||||
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner', 'deepseek.r1'];
|
||||
|
||||
@@ -480,6 +768,187 @@ describe('Deepseek Model Tests', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('Qwen3 Model Tests', () => {
|
||||
describe('Qwen3 Base Models', () => {
|
||||
it('should return correct pricing for qwen3 base pattern', () => {
|
||||
expect(getMultiplier({ model: 'qwen3', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-4b (falls back to qwen3)', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-4b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-4b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-8b', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-8b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-8b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-8b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-8b'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-14b', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-14b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-14b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-14b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-14b'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-235b-a22b', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-235b-a22b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-235b-a22b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-235b-a22b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-235b-a22b'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle model name variations with provider prefixes', () => {
|
||||
const models = [
|
||||
{ input: 'qwen3', expected: 'qwen3' },
|
||||
{ input: 'qwen3-4b', expected: 'qwen3' },
|
||||
{ input: 'qwen3-8b', expected: 'qwen3-8b' },
|
||||
{ input: 'qwen3-32b', expected: 'qwen3-32b' },
|
||||
];
|
||||
models.forEach(({ input, expected }) => {
|
||||
const withPrefix = `alibaba/${input}`;
|
||||
expect(getMultiplier({ model: withPrefix, tokenType: 'prompt' })).toBe(
|
||||
tokenValues[expected].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: withPrefix, tokenType: 'completion' })).toBe(
|
||||
tokenValues[expected].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Qwen3 VL (Vision-Language) Models', () => {
|
||||
it('should return correct pricing for qwen3-vl-8b-thinking', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-vl-8b-thinking', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-vl-8b-thinking'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-vl-8b-thinking', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-vl-8b-thinking'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-vl-8b-instruct', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-vl-8b-instruct', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-vl-8b-instruct'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-vl-8b-instruct', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-vl-8b-instruct'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-vl-30b-a3b', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-vl-30b-a3b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-vl-30b-a3b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-vl-30b-a3b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-vl-30b-a3b'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-vl-235b-a22b', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-vl-235b-a22b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-vl-235b-a22b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-vl-235b-a22b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-vl-235b-a22b'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Qwen3 Specialized Models', () => {
|
||||
it('should return correct pricing for qwen3-max', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-max', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-max'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-max', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-max'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-coder', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-coder', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-coder'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-coder', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-coder'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-coder-plus', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-coder-plus', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-coder-plus'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-coder-plus', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-coder-plus'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-coder-flash', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-coder-flash', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-coder-flash'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-coder-flash', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-coder-flash'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct pricing for qwen3-next-80b-a3b', () => {
|
||||
expect(getMultiplier({ model: 'qwen3-next-80b-a3b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['qwen3-next-80b-a3b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'qwen3-next-80b-a3b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3-next-80b-a3b'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Qwen3 Model Variations', () => {
|
||||
it('should handle all qwen3 models with provider prefixes', () => {
|
||||
const models = ['qwen3', 'qwen3-8b', 'qwen3-max', 'qwen3-coder', 'qwen3-vl-8b-instruct'];
|
||||
const prefixes = ['alibaba', 'qwen', 'openrouter'];
|
||||
|
||||
models.forEach((model) => {
|
||||
prefixes.forEach((prefix) => {
|
||||
const fullModel = `${prefix}/${model}`;
|
||||
expect(getMultiplier({ model: fullModel, tokenType: 'prompt' })).toBe(
|
||||
tokenValues[model].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: fullModel, tokenType: 'completion' })).toBe(
|
||||
tokenValues[model].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle qwen3-4b falling back to qwen3 base pattern', () => {
|
||||
const testCases = ['qwen3-4b', 'alibaba/qwen3-4b', 'qwen/qwen3-4b-preview'];
|
||||
testCases.forEach((model) => {
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues['qwen3'].prompt);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
|
||||
tokenValues['qwen3'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('getCacheMultiplier', () => {
|
||||
it('should return the correct cache multiplier for a given valueKey and cacheType', () => {
|
||||
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'write' })).toBe(
|
||||
@@ -772,6 +1241,110 @@ describe('Grok Model Tests - Pricing', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('GLM Model Tests', () => {
|
||||
it('should return expected value keys for GLM models', () => {
|
||||
expect(getValueKey('glm-4.6')).toBe('glm-4.6');
|
||||
expect(getValueKey('glm-4.5')).toBe('glm-4.5');
|
||||
expect(getValueKey('glm-4.5v')).toBe('glm-4.5v');
|
||||
expect(getValueKey('glm-4.5-air')).toBe('glm-4.5-air');
|
||||
expect(getValueKey('glm-4-32b')).toBe('glm-4-32b');
|
||||
expect(getValueKey('glm-4')).toBe('glm-4');
|
||||
expect(getValueKey('glm4')).toBe('glm4');
|
||||
});
|
||||
|
||||
it('should match GLM model variations with provider prefixes', () => {
|
||||
expect(getValueKey('z-ai/glm-4.6')).toBe('glm-4.6');
|
||||
expect(getValueKey('z-ai/glm-4.5')).toBe('glm-4.5');
|
||||
expect(getValueKey('z-ai/glm-4.5-air')).toBe('glm-4.5-air');
|
||||
expect(getValueKey('z-ai/glm-4.5v')).toBe('glm-4.5v');
|
||||
expect(getValueKey('z-ai/glm-4-32b')).toBe('glm-4-32b');
|
||||
|
||||
expect(getValueKey('zai/glm-4.6')).toBe('glm-4.6');
|
||||
expect(getValueKey('zai/glm-4.5')).toBe('glm-4.5');
|
||||
expect(getValueKey('zai/glm-4.5-air')).toBe('glm-4.5-air');
|
||||
expect(getValueKey('zai/glm-4.5v')).toBe('glm-4.5v');
|
||||
|
||||
expect(getValueKey('zai-org/GLM-4.6')).toBe('glm-4.6');
|
||||
expect(getValueKey('zai-org/GLM-4.5')).toBe('glm-4.5');
|
||||
expect(getValueKey('zai-org/GLM-4.5-Air')).toBe('glm-4.5-air');
|
||||
expect(getValueKey('zai-org/GLM-4.5V')).toBe('glm-4.5v');
|
||||
expect(getValueKey('zai-org/GLM-4-32B-0414')).toBe('glm-4-32b');
|
||||
});
|
||||
|
||||
it('should match GLM model variations with suffixes', () => {
|
||||
expect(getValueKey('glm-4.6-fp8')).toBe('glm-4.6');
|
||||
expect(getValueKey('zai-org/GLM-4.6-FP8')).toBe('glm-4.6');
|
||||
expect(getValueKey('zai-org/GLM-4.5-Air-FP8')).toBe('glm-4.5-air');
|
||||
});
|
||||
|
||||
it('should prioritize more specific GLM model patterns', () => {
|
||||
expect(getValueKey('glm-4.5-air-something')).toBe('glm-4.5-air');
|
||||
expect(getValueKey('glm-4.5-something')).toBe('glm-4.5');
|
||||
expect(getValueKey('glm-4.5v-something')).toBe('glm-4.5v');
|
||||
});
|
||||
|
||||
it('should return correct multipliers for all GLM models', () => {
|
||||
expect(getMultiplier({ model: 'glm-4.6', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4.6'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'glm-4.6', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4.6'].completion,
|
||||
);
|
||||
|
||||
expect(getMultiplier({ model: 'glm-4.5v', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4.5v'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'glm-4.5v', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4.5v'].completion,
|
||||
);
|
||||
|
||||
expect(getMultiplier({ model: 'glm-4.5-air', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4.5-air'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'glm-4.5-air', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4.5-air'].completion,
|
||||
);
|
||||
|
||||
expect(getMultiplier({ model: 'glm-4.5', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4.5'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'glm-4.5', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4.5'].completion,
|
||||
);
|
||||
|
||||
expect(getMultiplier({ model: 'glm-4-32b', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4-32b'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'glm-4-32b', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4-32b'].completion,
|
||||
);
|
||||
|
||||
expect(getMultiplier({ model: 'glm-4', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'glm-4', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4'].completion,
|
||||
);
|
||||
|
||||
expect(getMultiplier({ model: 'glm4', tokenType: 'prompt' })).toBe(tokenValues['glm4'].prompt);
|
||||
expect(getMultiplier({ model: 'glm4', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm4'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct multipliers for GLM models with provider prefixes', () => {
|
||||
expect(getMultiplier({ model: 'z-ai/glm-4.6', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4.6'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'zai/glm-4.5-air', tokenType: 'completion' })).toBe(
|
||||
tokenValues['glm-4.5-air'].completion,
|
||||
);
|
||||
expect(getMultiplier({ model: 'zai-org/GLM-4.5V', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['glm-4.5v'].prompt,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Claude Model Tests', () => {
|
||||
it('should return correct prompt and completion rates for Claude 4 models', () => {
|
||||
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'prompt' })).toBe(
|
||||
@@ -788,6 +1361,37 @@ describe('Claude Model Tests', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct prompt and completion rates for Claude Haiku 4.5', () => {
|
||||
expect(getMultiplier({ model: 'claude-haiku-4-5', tokenType: 'prompt' })).toBe(
|
||||
tokenValues['claude-haiku-4-5'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model: 'claude-haiku-4-5', tokenType: 'completion' })).toBe(
|
||||
tokenValues['claude-haiku-4-5'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle Claude Haiku 4.5 model name variations', () => {
|
||||
const modelVariations = [
|
||||
'claude-haiku-4-5',
|
||||
'claude-haiku-4-5-20250420',
|
||||
'claude-haiku-4-5-latest',
|
||||
'anthropic/claude-haiku-4-5',
|
||||
'claude-haiku-4-5/anthropic',
|
||||
'claude-haiku-4-5-preview',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
const valueKey = getValueKey(model);
|
||||
expect(valueKey).toBe('claude-haiku-4-5');
|
||||
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(
|
||||
tokenValues['claude-haiku-4-5'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
|
||||
tokenValues['claude-haiku-4-5'].completion,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
|
||||
const modelVariations = [
|
||||
'claude-sonnet-4',
|
||||
@@ -865,3 +1469,119 @@ describe('Claude Model Tests', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('tokens.ts and tx.js sync validation', () => {
|
||||
it('should resolve all models in maxTokensMap to pricing via getValueKey', () => {
|
||||
const tokensKeys = Object.keys(maxTokensMap[EModelEndpoint.openAI]);
|
||||
const txKeys = Object.keys(tokenValues);
|
||||
|
||||
const unresolved = [];
|
||||
|
||||
tokensKeys.forEach((key) => {
|
||||
// Skip legacy token size mappings (e.g., '4k', '8k', '16k', '32k')
|
||||
if (/^\d+k$/.test(key)) return;
|
||||
|
||||
// Skip generic pattern keys (end with '-' or ':')
|
||||
if (key.endsWith('-') || key.endsWith(':')) return;
|
||||
|
||||
// Try to resolve via getValueKey
|
||||
const resolvedKey = getValueKey(key);
|
||||
|
||||
// If it resolves and the resolved key has pricing, success
|
||||
if (resolvedKey && txKeys.includes(resolvedKey)) return;
|
||||
|
||||
// If it resolves to a legacy key (4k, 8k, etc), also OK
|
||||
if (resolvedKey && /^\d+k$/.test(resolvedKey)) return;
|
||||
|
||||
// If we get here, this model can't get pricing - flag it
|
||||
unresolved.push({
|
||||
key,
|
||||
resolvedKey: resolvedKey || 'undefined',
|
||||
context: maxTokensMap[EModelEndpoint.openAI][key],
|
||||
});
|
||||
});
|
||||
|
||||
if (unresolved.length > 0) {
|
||||
console.log('\nModels that cannot resolve to pricing via getValueKey:');
|
||||
unresolved.forEach(({ key, resolvedKey, context }) => {
|
||||
console.log(` - '${key}' → '${resolvedKey}' (context: ${context})`);
|
||||
});
|
||||
}
|
||||
|
||||
expect(unresolved).toEqual([]);
|
||||
});
|
||||
|
||||
it('should not have redundant dated variants with same pricing and context as base model', () => {
|
||||
const txKeys = Object.keys(tokenValues);
|
||||
const redundant = [];
|
||||
|
||||
txKeys.forEach((key) => {
|
||||
// Check if this is a dated variant (ends with -YYYY-MM-DD)
|
||||
if (key.match(/.*-\d{4}-\d{2}-\d{2}$/)) {
|
||||
const baseKey = key.replace(/-\d{4}-\d{2}-\d{2}$/, '');
|
||||
|
||||
if (txKeys.includes(baseKey)) {
|
||||
const variantPricing = tokenValues[key];
|
||||
const basePricing = tokenValues[baseKey];
|
||||
const variantContext = maxTokensMap[EModelEndpoint.openAI][key];
|
||||
const baseContext = maxTokensMap[EModelEndpoint.openAI][baseKey];
|
||||
|
||||
const samePricing =
|
||||
variantPricing.prompt === basePricing.prompt &&
|
||||
variantPricing.completion === basePricing.completion;
|
||||
const sameContext = variantContext === baseContext;
|
||||
|
||||
if (samePricing && sameContext) {
|
||||
redundant.push({
|
||||
key,
|
||||
baseKey,
|
||||
pricing: `${variantPricing.prompt}/${variantPricing.completion}`,
|
||||
context: variantContext,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
if (redundant.length > 0) {
|
||||
console.log('\nRedundant dated variants found (same pricing and context as base):');
|
||||
redundant.forEach(({ key, baseKey, pricing, context }) => {
|
||||
console.log(` - '${key}' → '${baseKey}' (pricing: ${pricing}, context: ${context})`);
|
||||
console.log(` Can be removed - pattern matching will handle it`);
|
||||
});
|
||||
}
|
||||
|
||||
expect(redundant).toEqual([]);
|
||||
});
|
||||
|
||||
it('should have context windows in tokens.ts for all models with pricing in tx.js (openAI catch-all)', () => {
|
||||
const txKeys = Object.keys(tokenValues);
|
||||
const missingContext = [];
|
||||
|
||||
txKeys.forEach((key) => {
|
||||
// Skip legacy token size mappings (4k, 8k, 16k, 32k)
|
||||
if (/^\d+k$/.test(key)) return;
|
||||
|
||||
// Check if this model has a context window defined
|
||||
const context = maxTokensMap[EModelEndpoint.openAI][key];
|
||||
|
||||
if (!context) {
|
||||
const pricing = tokenValues[key];
|
||||
missingContext.push({
|
||||
key,
|
||||
pricing: `${pricing.prompt}/${pricing.completion}`,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
if (missingContext.length > 0) {
|
||||
console.log('\nModels with pricing but missing context in tokens.ts:');
|
||||
missingContext.forEach(({ key, pricing }) => {
|
||||
console.log(` - '${key}' (pricing: ${pricing})`);
|
||||
console.log(` Add to tokens.ts openAIModels/bedrockModels/etc.`);
|
||||
});
|
||||
}
|
||||
|
||||
expect(missingContext).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "v0.8.0",
|
||||
"version": "v0.8.1-rc1",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -47,9 +47,8 @@
|
||||
"@langchain/core": "^0.3.62",
|
||||
"@langchain/google-genai": "^0.2.13",
|
||||
"@langchain/google-vertexai": "^0.2.13",
|
||||
"@langchain/openai": "^0.5.18",
|
||||
"@langchain/textsplitters": "^0.1.0",
|
||||
"@librechat/agents": "^2.4.82",
|
||||
"@librechat/agents": "^2.4.90",
|
||||
"@librechat/api": "*",
|
||||
"@librechat/data-schemas": "*",
|
||||
"@microsoft/microsoft-graph-client": "^3.0.7",
|
||||
@@ -94,7 +93,7 @@
|
||||
"multer": "^2.0.2",
|
||||
"nanoid": "^3.3.7",
|
||||
"node-fetch": "^2.7.0",
|
||||
"nodemailer": "^6.9.15",
|
||||
"nodemailer": "^7.0.9",
|
||||
"ollama": "^0.5.0",
|
||||
"openai": "^5.10.1",
|
||||
"openid-client": "^6.5.0",
|
||||
|
||||
@@ -116,11 +116,15 @@ const refreshController = async (req, res) => {
|
||||
const token = await setAuthTokens(userId, res, session);
|
||||
|
||||
// trigger OAuth MCP server reconnection asynchronously (best effort)
|
||||
void getOAuthReconnectionManager()
|
||||
.reconnectServers(userId)
|
||||
.catch((err) => {
|
||||
logger.error('Error reconnecting OAuth MCP servers:', err);
|
||||
});
|
||||
try {
|
||||
void getOAuthReconnectionManager()
|
||||
.reconnectServers(userId)
|
||||
.catch((err) => {
|
||||
logger.error('[refreshController] Error reconnecting OAuth MCP servers:', err);
|
||||
});
|
||||
} catch (err) {
|
||||
logger.warn(`[refreshController] Cannot attempt OAuth MCP servers reconnection:`, err);
|
||||
}
|
||||
|
||||
res.status(200).send({ token, user });
|
||||
} else if (req?.query?.retry) {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logger, webSearchKeys } = require('@librechat/data-schemas');
|
||||
const { Tools, CacheKeys, Constants, FileSources } = require('librechat-data-provider');
|
||||
const {
|
||||
webSearchKeys,
|
||||
MCPOAuthHandler,
|
||||
MCPTokenStorage,
|
||||
normalizeHttpError,
|
||||
@@ -328,16 +327,23 @@ const maybeUninstallOAuthMCP = async (userId, pluginKey, appConfig) => {
|
||||
const revocationEndpointAuthMethodsSupported =
|
||||
serverConfig.oauth?.revocation_endpoint_auth_methods_supported ??
|
||||
clientMetadata.revocation_endpoint_auth_methods_supported;
|
||||
const oauthHeaders = serverConfig.oauth_headers ?? {};
|
||||
|
||||
if (tokens?.access_token) {
|
||||
try {
|
||||
await MCPOAuthHandler.revokeOAuthToken(serverName, tokens.access_token, 'access', {
|
||||
serverUrl: serverConfig.url,
|
||||
clientId: clientInfo.client_id,
|
||||
clientSecret: clientInfo.client_secret ?? '',
|
||||
revocationEndpoint,
|
||||
revocationEndpointAuthMethodsSupported,
|
||||
});
|
||||
await MCPOAuthHandler.revokeOAuthToken(
|
||||
serverName,
|
||||
tokens.access_token,
|
||||
'access',
|
||||
{
|
||||
serverUrl: serverConfig.url,
|
||||
clientId: clientInfo.client_id,
|
||||
clientSecret: clientInfo.client_secret ?? '',
|
||||
revocationEndpoint,
|
||||
revocationEndpointAuthMethodsSupported,
|
||||
},
|
||||
oauthHeaders,
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(`Error revoking OAuth access token for ${serverName}:`, error);
|
||||
}
|
||||
@@ -345,13 +351,19 @@ const maybeUninstallOAuthMCP = async (userId, pluginKey, appConfig) => {
|
||||
|
||||
if (tokens?.refresh_token) {
|
||||
try {
|
||||
await MCPOAuthHandler.revokeOAuthToken(serverName, tokens.refresh_token, 'refresh', {
|
||||
serverUrl: serverConfig.url,
|
||||
clientId: clientInfo.client_id,
|
||||
clientSecret: clientInfo.client_secret ?? '',
|
||||
revocationEndpoint,
|
||||
revocationEndpointAuthMethodsSupported,
|
||||
});
|
||||
await MCPOAuthHandler.revokeOAuthToken(
|
||||
serverName,
|
||||
tokens.refresh_token,
|
||||
'refresh',
|
||||
{
|
||||
serverUrl: serverConfig.url,
|
||||
clientId: clientInfo.client_id,
|
||||
clientSecret: clientInfo.client_secret ?? '',
|
||||
revocationEndpoint,
|
||||
revocationEndpointAuthMethodsSupported,
|
||||
},
|
||||
oauthHeaders,
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(`Error revoking OAuth refresh token for ${serverName}:`, error);
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ const {
|
||||
Tokenizer,
|
||||
checkAccess,
|
||||
logAxiosError,
|
||||
sanitizeTitle,
|
||||
resolveHeaders,
|
||||
getBalanceConfig,
|
||||
memoryInstructions,
|
||||
@@ -211,16 +212,13 @@ class AgentClient extends BaseClient {
|
||||
* @returns {Promise<Array<Partial<MongoFile>>>}
|
||||
*/
|
||||
async addImageURLs(message, attachments) {
|
||||
const { files, text, image_urls } = await encodeAndFormat(
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
this.options.req,
|
||||
attachments,
|
||||
this.options.agent.provider,
|
||||
VisionModes.agents,
|
||||
);
|
||||
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||
if (text && text.length) {
|
||||
message.ocr = text;
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
@@ -248,19 +246,18 @@ class AgentClient extends BaseClient {
|
||||
|
||||
if (this.options.attachments) {
|
||||
const attachments = await this.options.attachments;
|
||||
const latestMessage = orderedMessages[orderedMessages.length - 1];
|
||||
|
||||
if (this.message_file_map) {
|
||||
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
|
||||
this.message_file_map[latestMessage.messageId] = attachments;
|
||||
} else {
|
||||
this.message_file_map = {
|
||||
[orderedMessages[orderedMessages.length - 1].messageId]: attachments,
|
||||
[latestMessage.messageId]: attachments,
|
||||
};
|
||||
}
|
||||
|
||||
const files = await this.addImageURLs(
|
||||
orderedMessages[orderedMessages.length - 1],
|
||||
attachments,
|
||||
);
|
||||
await this.addFileContextToMessage(latestMessage, attachments);
|
||||
const files = await this.processAttachments(latestMessage, attachments);
|
||||
|
||||
this.options.attachments = files;
|
||||
}
|
||||
@@ -280,21 +277,21 @@ class AgentClient extends BaseClient {
|
||||
assistantName: this.options?.modelLabel,
|
||||
});
|
||||
|
||||
if (message.ocr && i !== orderedMessages.length - 1) {
|
||||
if (message.fileContext && i !== orderedMessages.length - 1) {
|
||||
if (typeof formattedMessage.content === 'string') {
|
||||
formattedMessage.content = message.ocr + '\n' + formattedMessage.content;
|
||||
formattedMessage.content = message.fileContext + '\n' + formattedMessage.content;
|
||||
} else {
|
||||
const textPart = formattedMessage.content.find((part) => part.type === 'text');
|
||||
textPart
|
||||
? (textPart.text = message.ocr + '\n' + textPart.text)
|
||||
: formattedMessage.content.unshift({ type: 'text', text: message.ocr });
|
||||
? (textPart.text = message.fileContext + '\n' + textPart.text)
|
||||
: formattedMessage.content.unshift({ type: 'text', text: message.fileContext });
|
||||
}
|
||||
} else if (message.ocr && i === orderedMessages.length - 1) {
|
||||
systemContent = [systemContent, message.ocr].join('\n');
|
||||
} else if (message.fileContext && i === orderedMessages.length - 1) {
|
||||
systemContent = [systemContent, message.fileContext].join('\n');
|
||||
}
|
||||
|
||||
const needsTokenCount =
|
||||
(this.contextStrategy && !orderedMessages[i].tokenCount) || message.ocr;
|
||||
(this.contextStrategy && !orderedMessages[i].tokenCount) || message.fileContext;
|
||||
|
||||
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
|
||||
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
|
||||
@@ -779,6 +776,7 @@ class AgentClient extends BaseClient {
|
||||
const agentsEConfig = appConfig.endpoints?.[EModelEndpoint.agents];
|
||||
|
||||
config = {
|
||||
runName: 'AgentRun',
|
||||
configurable: {
|
||||
thread_id: this.conversationId,
|
||||
last_agent_index: this.agentConfigs?.size ?? 0,
|
||||
@@ -1116,8 +1114,8 @@ class AgentClient extends BaseClient {
|
||||
appConfig.endpoints?.[endpoint] ??
|
||||
titleProviderConfig.customEndpointConfig;
|
||||
if (!endpointConfig) {
|
||||
logger.warn(
|
||||
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',
|
||||
logger.debug(
|
||||
`[api/server/controllers/agents/client.js #titleConvo] No endpoint config for "${endpoint}"`,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -1237,6 +1235,10 @@ class AgentClient extends BaseClient {
|
||||
handleLLMEnd,
|
||||
},
|
||||
],
|
||||
configurable: {
|
||||
thread_id: this.conversationId,
|
||||
user_id: this.user ?? this.options.req.user?.id,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -1274,7 +1276,7 @@ class AgentClient extends BaseClient {
|
||||
);
|
||||
});
|
||||
|
||||
return titleResult.title;
|
||||
return sanitizeTitle(titleResult.title);
|
||||
} catch (err) {
|
||||
logger.error('[api/server/controllers/agents/client.js #titleConvo] Error', err);
|
||||
return;
|
||||
|
||||
@@ -10,6 +10,10 @@ jest.mock('@librechat/agents', () => ({
|
||||
}),
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/api', () => ({
|
||||
...jest.requireActual('@librechat/api'),
|
||||
}));
|
||||
|
||||
describe('AgentClient - titleConvo', () => {
|
||||
let client;
|
||||
let mockRun;
|
||||
@@ -252,6 +256,38 @@ describe('AgentClient - titleConvo', () => {
|
||||
expect(result).toBe('Generated Title');
|
||||
});
|
||||
|
||||
it('should sanitize the generated title by removing think blocks', async () => {
|
||||
const titleWithThinkBlock = '<think>reasoning about the title</think> User Hi Greeting';
|
||||
mockRun.generateTitle.mockResolvedValue({
|
||||
title: titleWithThinkBlock,
|
||||
});
|
||||
|
||||
const text = 'Test conversation text';
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await client.titleConvo({ text, abortController });
|
||||
|
||||
// Should remove the <think> block and return only the clean title
|
||||
expect(result).toBe('User Hi Greeting');
|
||||
expect(result).not.toContain('<think>');
|
||||
expect(result).not.toContain('</think>');
|
||||
});
|
||||
|
||||
it('should return fallback title when sanitization results in empty string', async () => {
|
||||
const titleOnlyThinkBlock = '<think>only reasoning no actual title</think>';
|
||||
mockRun.generateTitle.mockResolvedValue({
|
||||
title: titleOnlyThinkBlock,
|
||||
});
|
||||
|
||||
const text = 'Test conversation text';
|
||||
const abortController = new AbortController();
|
||||
|
||||
const result = await client.titleConvo({ text, abortController });
|
||||
|
||||
// Should return the fallback title since sanitization would result in empty string
|
||||
expect(result).toBe('Untitled Conversation');
|
||||
});
|
||||
|
||||
it('should handle errors gracefully and return undefined', async () => {
|
||||
mockRun.generateTitle.mockRejectedValue(new Error('Title generation failed'));
|
||||
|
||||
|
||||
@@ -10,7 +10,12 @@ const compression = require('compression');
|
||||
const cookieParser = require('cookie-parser');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const mongoSanitize = require('express-mongo-sanitize');
|
||||
const { isEnabled, ErrorController } = require('@librechat/api');
|
||||
const {
|
||||
isEnabled,
|
||||
ErrorController,
|
||||
performStartupChecks,
|
||||
initializeFileStorage,
|
||||
} = require('@librechat/api');
|
||||
const { connectDb, indexSync } = require('~/db');
|
||||
const initializeOAuthReconnectManager = require('./services/initializeOAuthReconnectManager');
|
||||
const createValidateImageRequest = require('./middleware/validateImageRequest');
|
||||
@@ -49,9 +54,11 @@ const startServer = async () => {
|
||||
app.set('trust proxy', trusted_proxy);
|
||||
|
||||
await seedDatabase();
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
initializeFileStorage(appConfig);
|
||||
await performStartupChecks(appConfig);
|
||||
await updateInterfacePermissions(appConfig);
|
||||
|
||||
const indexPath = path.join(appConfig.paths.dist, 'index.html');
|
||||
let indexHTML = fs.readFileSync(indexPath, 'utf8');
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
const { Keyv } = require('keyv');
|
||||
const uap = require('ua-parser-js');
|
||||
const { isEnabled } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { isEnabled, keyvMongo } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const keyvMongo = require('~/cache/keyvMongo');
|
||||
const denyRequest = require('./denyRequest');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { findUser } = require('~/models');
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
|
||||
const getEnvironmentVariables = () => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
|
||||
const getEnvironmentVariables = () => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const { logViolation } = require('~/cache');
|
||||
|
||||
const { LOGIN_WINDOW = 5, LOGIN_MAX = 7, LOGIN_VIOLATION_SCORE: score } = process.env;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const denyRequest = require('~/server/middleware/denyRequest');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const { logViolation } = require('~/cache');
|
||||
|
||||
const {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const { logViolation } = require('~/cache');
|
||||
|
||||
const { REGISTER_WINDOW = 60, REGISTER_MAX = 5, REGISTRATION_VIOLATION_SCORE: score } = process.env;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const { logViolation } = require('~/cache');
|
||||
|
||||
const {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
|
||||
const getEnvironmentVariables = () => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
|
||||
const { TOOL_CALL_VIOLATION_SCORE: score } = process.env;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
|
||||
const getEnvironmentVariables = () => {
|
||||
const TTS_IP_MAX = parseInt(process.env.TTS_IP_MAX) || 100;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
|
||||
const getEnvironmentVariables = () => {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { limiterCache } = require('@librechat/api');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const { removePorts } = require('~/server/utils');
|
||||
const { limiterCache } = require('~/cache/cacheFactory');
|
||||
const { logViolation } = require('~/cache');
|
||||
|
||||
const {
|
||||
|
||||
@@ -127,8 +127,13 @@ describe('MCP Routes', () => {
|
||||
}),
|
||||
};
|
||||
|
||||
const mockMcpManager = {
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
|
||||
getLogStores.mockReturnValue({});
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
MCPOAuthHandler.initiateOAuthFlow.mockResolvedValue({
|
||||
authorizationUrl: 'https://oauth.example.com/auth',
|
||||
@@ -146,6 +151,7 @@ describe('MCP Routes', () => {
|
||||
'test-server',
|
||||
'https://test-server.com',
|
||||
'test-user-id',
|
||||
{},
|
||||
{ clientId: 'test-client-id' },
|
||||
);
|
||||
});
|
||||
@@ -314,6 +320,7 @@ describe('MCP Routes', () => {
|
||||
};
|
||||
const mockMcpManager = {
|
||||
getUserConnection: jest.fn().mockResolvedValue(mockUserConnection),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
@@ -336,6 +343,7 @@ describe('MCP Routes', () => {
|
||||
'test-flow-id',
|
||||
'test-auth-code',
|
||||
mockFlowManager,
|
||||
{},
|
||||
);
|
||||
expect(MCPTokenStorage.storeTokens).toHaveBeenCalledWith(
|
||||
expect.objectContaining({
|
||||
@@ -392,6 +400,11 @@ describe('MCP Routes', () => {
|
||||
getLogStores.mockReturnValue({});
|
||||
require('~/config').getFlowStateManager.mockReturnValue(mockFlowManager);
|
||||
|
||||
const mockMcpManager = {
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
const response = await request(app).get('/api/mcp/test-server/oauth/callback').query({
|
||||
code: 'test-auth-code',
|
||||
state: 'test-flow-id',
|
||||
@@ -427,6 +440,7 @@ describe('MCP Routes', () => {
|
||||
|
||||
const mockMcpManager = {
|
||||
getUserConnection: jest.fn().mockRejectedValue(new Error('Reconnection failed')),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
@@ -1234,6 +1248,7 @@ describe('MCP Routes', () => {
|
||||
getUserConnection: jest.fn().mockResolvedValue({
|
||||
fetchTools: jest.fn().mockResolvedValue([]),
|
||||
}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
@@ -1281,6 +1296,7 @@ describe('MCP Routes', () => {
|
||||
.fn()
|
||||
.mockResolvedValue([{ name: 'test-tool', description: 'Test tool' }]),
|
||||
}),
|
||||
getRawConfig: jest.fn().mockReturnValue({}),
|
||||
};
|
||||
require('~/config').getMCPManager.mockReturnValue(mockMcpManager);
|
||||
|
||||
|
||||
@@ -115,6 +115,9 @@ router.get('/', async function (req, res) {
|
||||
sharePointPickerGraphScope: process.env.SHAREPOINT_PICKER_GRAPH_SCOPE,
|
||||
sharePointPickerSharePointScope: process.env.SHAREPOINT_PICKER_SHAREPOINT_SCOPE,
|
||||
openidReuseTokens,
|
||||
conversationImportMaxFileSize: process.env.CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES
|
||||
? parseInt(process.env.CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES, 10)
|
||||
: 0,
|
||||
};
|
||||
|
||||
const minPasswordLength = parseInt(process.env.MIN_PASSWORD_LENGTH, 10);
|
||||
@@ -156,7 +159,7 @@ router.get('/', async function (req, res) {
|
||||
if (
|
||||
webSearchConfig != null &&
|
||||
(webSearchConfig.searchProvider ||
|
||||
webSearchConfig.scraperType ||
|
||||
webSearchConfig.scraperProvider ||
|
||||
webSearchConfig.rerankerType)
|
||||
) {
|
||||
payload.webSearch = {};
|
||||
@@ -165,8 +168,8 @@ router.get('/', async function (req, res) {
|
||||
if (webSearchConfig?.searchProvider) {
|
||||
payload.webSearch.searchProvider = webSearchConfig.searchProvider;
|
||||
}
|
||||
if (webSearchConfig?.scraperType) {
|
||||
payload.webSearch.scraperType = webSearchConfig.scraperType;
|
||||
if (webSearchConfig?.scraperProvider) {
|
||||
payload.webSearch.scraperProvider = webSearchConfig.scraperProvider;
|
||||
}
|
||||
if (webSearchConfig?.rerankerType) {
|
||||
payload.webSearch.rerankerType = webSearchConfig.rerankerType;
|
||||
|
||||
@@ -65,6 +65,7 @@ router.get('/:serverName/oauth/initiate', requireJwtAuth, async (req, res) => {
|
||||
serverName,
|
||||
serverUrl,
|
||||
userId,
|
||||
getOAuthHeaders(serverName),
|
||||
oauthConfig,
|
||||
);
|
||||
|
||||
@@ -132,7 +133,12 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
|
||||
});
|
||||
|
||||
logger.debug('[MCP OAuth] Completing OAuth flow');
|
||||
const tokens = await MCPOAuthHandler.completeOAuthFlow(flowId, code, flowManager);
|
||||
const tokens = await MCPOAuthHandler.completeOAuthFlow(
|
||||
flowId,
|
||||
code,
|
||||
flowManager,
|
||||
getOAuthHeaders(serverName),
|
||||
);
|
||||
logger.info('[MCP OAuth] OAuth flow completed, tokens received in callback route');
|
||||
|
||||
/** Persist tokens immediately so reconnection uses fresh credentials */
|
||||
@@ -538,4 +544,10 @@ router.get('/:serverName/auth-values', requireJwtAuth, async (req, res) => {
|
||||
}
|
||||
});
|
||||
|
||||
function getOAuthHeaders(serverName) {
|
||||
const mcpManager = getMCPManager();
|
||||
const serverConfig = mcpManager.getRawConfig(serverName);
|
||||
return serverConfig?.oauth_headers ?? {};
|
||||
}
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -99,7 +99,8 @@ router.get('/link/:conversationId', requireJwtAuth, async (req, res) => {
|
||||
|
||||
router.post('/:conversationId', requireJwtAuth, async (req, res) => {
|
||||
try {
|
||||
const created = await createSharedLink(req.user.id, req.params.conversationId);
|
||||
const { targetMessageId } = req.body;
|
||||
const created = await createSharedLink(req.user.id, req.params.conversationId, targetMessageId);
|
||||
if (created) {
|
||||
res.status(200).json(created);
|
||||
} else {
|
||||
|
||||
@@ -1,198 +0,0 @@
|
||||
jest.mock('@librechat/data-schemas', () => ({
|
||||
logger: {
|
||||
info: jest.fn(),
|
||||
warn: jest.fn(),
|
||||
error: jest.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
jest.mock('@librechat/api', () => ({
|
||||
...jest.requireActual('@librechat/api'),
|
||||
loadDefaultInterface: jest.fn(),
|
||||
}));
|
||||
jest.mock('./start/tools', () => ({
|
||||
loadAndFormatTools: jest.fn().mockReturnValue({}),
|
||||
}));
|
||||
jest.mock('./start/checks', () => ({
|
||||
checkVariables: jest.fn(),
|
||||
checkHealth: jest.fn(),
|
||||
checkConfig: jest.fn(),
|
||||
checkAzureVariables: jest.fn(),
|
||||
checkWebSearchConfig: jest.fn(),
|
||||
}));
|
||||
|
||||
jest.mock('./Config/loadCustomConfig', () => jest.fn());
|
||||
|
||||
const AppService = require('./AppService');
|
||||
const { loadDefaultInterface } = require('@librechat/api');
|
||||
|
||||
describe('AppService interface configuration', () => {
|
||||
let mockLoadCustomConfig;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
jest.clearAllMocks();
|
||||
mockLoadCustomConfig = require('./Config/loadCustomConfig');
|
||||
});
|
||||
|
||||
it('should set prompts and bookmarks to true when loadDefaultInterface returns true for both', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({});
|
||||
loadDefaultInterface.mockResolvedValue({ prompts: true, bookmarks: true });
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.objectContaining({
|
||||
prompts: true,
|
||||
bookmarks: true,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(loadDefaultInterface).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should set prompts and bookmarks to false when loadDefaultInterface returns false for both', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({ interface: { prompts: false, bookmarks: false } });
|
||||
loadDefaultInterface.mockResolvedValue({ prompts: false, bookmarks: false });
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.objectContaining({
|
||||
prompts: false,
|
||||
bookmarks: false,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(loadDefaultInterface).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should not set prompts and bookmarks when loadDefaultInterface returns undefined for both', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({});
|
||||
loadDefaultInterface.mockResolvedValue({});
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.anything(),
|
||||
}),
|
||||
);
|
||||
|
||||
// Verify that prompts and bookmarks are undefined when not provided
|
||||
expect(result.interfaceConfig.prompts).toBeUndefined();
|
||||
expect(result.interfaceConfig.bookmarks).toBeUndefined();
|
||||
expect(loadDefaultInterface).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should set prompts and bookmarks to different values when loadDefaultInterface returns different values', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({ interface: { prompts: true, bookmarks: false } });
|
||||
loadDefaultInterface.mockResolvedValue({ prompts: true, bookmarks: false });
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.objectContaining({
|
||||
prompts: true,
|
||||
bookmarks: false,
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(loadDefaultInterface).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should correctly configure peoplePicker permissions including roles', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({
|
||||
interface: {
|
||||
peoplePicker: {
|
||||
users: true,
|
||||
groups: true,
|
||||
roles: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
loadDefaultInterface.mockResolvedValue({
|
||||
peoplePicker: {
|
||||
users: true,
|
||||
groups: true,
|
||||
roles: true,
|
||||
},
|
||||
});
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.objectContaining({
|
||||
peoplePicker: expect.objectContaining({
|
||||
users: true,
|
||||
groups: true,
|
||||
roles: true,
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
expect(loadDefaultInterface).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle mixed peoplePicker permissions', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({
|
||||
interface: {
|
||||
peoplePicker: {
|
||||
users: true,
|
||||
groups: false,
|
||||
roles: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
loadDefaultInterface.mockResolvedValue({
|
||||
peoplePicker: {
|
||||
users: true,
|
||||
groups: false,
|
||||
roles: true,
|
||||
},
|
||||
});
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.objectContaining({
|
||||
peoplePicker: expect.objectContaining({
|
||||
users: true,
|
||||
groups: false,
|
||||
roles: true,
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should set default peoplePicker permissions when not provided', async () => {
|
||||
mockLoadCustomConfig.mockResolvedValue({});
|
||||
loadDefaultInterface.mockResolvedValue({
|
||||
peoplePicker: {
|
||||
users: true,
|
||||
groups: true,
|
||||
roles: true,
|
||||
},
|
||||
});
|
||||
|
||||
const result = await AppService();
|
||||
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
interfaceConfig: expect.objectContaining({
|
||||
peoplePicker: expect.objectContaining({
|
||||
users: true,
|
||||
groups: true,
|
||||
roles: true,
|
||||
}),
|
||||
}),
|
||||
}),
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -1,11 +1,25 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const AppService = require('~/server/services/AppService');
|
||||
const { logger, AppService } = require('@librechat/data-schemas');
|
||||
const { loadAndFormatTools } = require('~/server/services/start/tools');
|
||||
const loadCustomConfig = require('./loadCustomConfig');
|
||||
const { setCachedTools } = require('./getCachedTools');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const paths = require('~/config/paths');
|
||||
|
||||
const BASE_CONFIG_KEY = '_BASE_';
|
||||
|
||||
const loadBaseConfig = async () => {
|
||||
/** @type {TCustomConfig} */
|
||||
const config = (await loadCustomConfig()) ?? {};
|
||||
/** @type {Record<string, FunctionTool>} */
|
||||
const systemTools = loadAndFormatTools({
|
||||
adminFilter: config.filteredTools,
|
||||
adminIncluded: config.includedTools,
|
||||
directory: paths.structuredTools,
|
||||
});
|
||||
return AppService({ config, paths, systemTools });
|
||||
};
|
||||
|
||||
/**
|
||||
* Get the app configuration based on user context
|
||||
* @param {Object} [options]
|
||||
@@ -29,7 +43,7 @@ async function getAppConfig(options = {}) {
|
||||
let baseConfig = await cache.get(BASE_CONFIG_KEY);
|
||||
if (!baseConfig) {
|
||||
logger.info('[getAppConfig] App configuration not initialized. Initializing AppService...');
|
||||
baseConfig = await AppService();
|
||||
baseConfig = await loadBaseConfig();
|
||||
|
||||
if (!baseConfig) {
|
||||
throw new Error('Failed to initialize app configuration through AppService.');
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
const { RateLimitPrefix } = require('librechat-data-provider');
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TCustomConfig['rateLimits'] | undefined} rateLimits
|
||||
*/
|
||||
const handleRateLimits = (rateLimits) => {
|
||||
if (!rateLimits) {
|
||||
return;
|
||||
}
|
||||
|
||||
const rateLimitKeys = {
|
||||
fileUploads: RateLimitPrefix.FILE_UPLOAD,
|
||||
conversationsImport: RateLimitPrefix.IMPORT,
|
||||
tts: RateLimitPrefix.TTS,
|
||||
stt: RateLimitPrefix.STT,
|
||||
};
|
||||
|
||||
Object.entries(rateLimitKeys).forEach(([key, prefix]) => {
|
||||
const rateLimit = rateLimits[key];
|
||||
if (rateLimit) {
|
||||
setRateLimitEnvVars(prefix, rateLimit);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Set environment variables for rate limit configurations
|
||||
*
|
||||
* @param {string} prefix - Prefix for environment variable names
|
||||
* @param {object} rateLimit - Rate limit configuration object
|
||||
*/
|
||||
const setRateLimitEnvVars = (prefix, rateLimit) => {
|
||||
const envVarsMapping = {
|
||||
ipMax: `${prefix}_IP_MAX`,
|
||||
ipWindowInMinutes: `${prefix}_IP_WINDOW`,
|
||||
userMax: `${prefix}_USER_MAX`,
|
||||
userWindowInMinutes: `${prefix}_USER_WINDOW`,
|
||||
};
|
||||
|
||||
Object.entries(envVarsMapping).forEach(([key, envVar]) => {
|
||||
if (rateLimit[key] !== undefined) {
|
||||
process.env[envVar] = rateLimit[key];
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = handleRateLimits;
|
||||
@@ -57,7 +57,7 @@ async function loadConfigModels(req) {
|
||||
|
||||
for (let i = 0; i < customEndpoints.length; i++) {
|
||||
const endpoint = customEndpoints[i];
|
||||
const { models, name: configName, baseURL, apiKey } = endpoint;
|
||||
const { models, name: configName, baseURL, apiKey, headers: endpointHeaders } = endpoint;
|
||||
const name = normalizeEndpointName(configName);
|
||||
endpointsMap[name] = endpoint;
|
||||
|
||||
@@ -76,6 +76,8 @@ async function loadConfigModels(req) {
|
||||
apiKey: API_KEY,
|
||||
baseURL: BASE_URL,
|
||||
user: req.user.id,
|
||||
userObject: req.user,
|
||||
headers: endpointHeaders,
|
||||
direct: endpoint.directEndpoint,
|
||||
userIdQuery: models.userIdQuery,
|
||||
});
|
||||
@@ -85,7 +87,9 @@ async function loadConfigModels(req) {
|
||||
}
|
||||
|
||||
if (Array.isArray(models.default)) {
|
||||
modelsConfig[name] = models.default;
|
||||
modelsConfig[name] = models.default.map((model) =>
|
||||
typeof model === 'string' ? model : model.name,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -254,8 +254,8 @@ describe('loadConfigModels', () => {
|
||||
// For groq and ollama, since the apiKey is "user_provided", models should not be fetched
|
||||
// Depending on your implementation's behavior regarding "default" models without fetching,
|
||||
// you may need to adjust the following assertions:
|
||||
expect(result.groq).toBe(exampleConfig.endpoints.custom[2].models.default);
|
||||
expect(result.ollama).toBe(exampleConfig.endpoints.custom[3].models.default);
|
||||
expect(result.groq).toEqual(exampleConfig.endpoints.custom[2].models.default);
|
||||
expect(result.ollama).toEqual(exampleConfig.endpoints.custom[3].models.default);
|
||||
|
||||
// Verifying fetchModels was not called for groq and ollama
|
||||
expect(fetchModels).not.toHaveBeenCalledWith(
|
||||
|
||||
@@ -5,14 +5,12 @@ const keyBy = require('lodash/keyBy');
|
||||
const { loadYaml } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const {
|
||||
CacheKeys,
|
||||
configSchema,
|
||||
paramSettings,
|
||||
EImageOutputType,
|
||||
agentParamSettings,
|
||||
validateSettingDefinitions,
|
||||
} = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
|
||||
const projectRoot = path.resolve(__dirname, '..', '..', '..', '..');
|
||||
const defaultConfigPath = path.resolve(projectRoot, 'librechat.yaml');
|
||||
@@ -119,7 +117,6 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
|
||||
.filter((endpoint) => endpoint.customParams)
|
||||
.forEach((endpoint) => parseCustomParams(endpoint.name, endpoint.customParams));
|
||||
|
||||
|
||||
if (result.data.modelSpecs) {
|
||||
customConfig.modelSpecs = result.data.modelSpecs;
|
||||
}
|
||||
|
||||
@@ -134,16 +134,16 @@ const initializeAgent = async ({
|
||||
});
|
||||
|
||||
const tokensModel =
|
||||
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : modelOptions.model;
|
||||
const maxTokens = optionalChainWithEmptyCheck(
|
||||
modelOptions.maxOutputTokens,
|
||||
modelOptions.maxTokens,
|
||||
agent.provider === EModelEndpoint.azureOpenAI ? agent.model : options.llmConfig?.model;
|
||||
const maxOutputTokens = optionalChainWithEmptyCheck(
|
||||
options.llmConfig?.maxOutputTokens,
|
||||
options.llmConfig?.maxTokens,
|
||||
0,
|
||||
);
|
||||
const agentMaxContextTokens = optionalChainWithEmptyCheck(
|
||||
maxContextTokens,
|
||||
getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig),
|
||||
4096,
|
||||
18000,
|
||||
);
|
||||
|
||||
if (
|
||||
@@ -203,7 +203,7 @@ const initializeAgent = async ({
|
||||
userMCPAuthMap,
|
||||
toolContextMap,
|
||||
useLegacyContent: !!options.useLegacyContent,
|
||||
maxContextTokens: Math.round((agentMaxContextTokens - maxTokens) * 0.9),
|
||||
maxContextTokens: Math.round((agentMaxContextTokens - maxOutputTokens) * 0.9),
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@@ -3,9 +3,10 @@ const { isAgentsEndpoint, removeNullishValues, Constants } = require('librechat-
|
||||
const { loadAgent } = require('~/models/Agent');
|
||||
|
||||
const buildOptions = (req, endpoint, parsedBody, endpointType) => {
|
||||
const { spec, iconURL, agent_id, instructions, ...model_parameters } = parsedBody;
|
||||
const { spec, iconURL, agent_id, ...model_parameters } = parsedBody;
|
||||
const agentPromise = loadAgent({
|
||||
req,
|
||||
spec,
|
||||
agent_id: isAgentsEndpoint(endpoint) ? agent_id : Constants.EPHEMERAL_AGENT_ID,
|
||||
endpoint,
|
||||
model_parameters,
|
||||
@@ -20,7 +21,6 @@ const buildOptions = (req, endpoint, parsedBody, endpointType) => {
|
||||
endpoint,
|
||||
agent_id,
|
||||
endpointType,
|
||||
instructions,
|
||||
model_parameters,
|
||||
agent: agentPromise,
|
||||
});
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
const { Providers } = require('@librechat/agents');
|
||||
const {
|
||||
resolveHeaders,
|
||||
isUserProvided,
|
||||
@@ -143,39 +142,27 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
|
||||
|
||||
if (optionsOnly) {
|
||||
const modelOptions = endpointOption?.model_parameters ?? {};
|
||||
if (endpoint !== Providers.OLLAMA) {
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions,
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null) {
|
||||
options.useLegacyContent = true;
|
||||
options.endpointTokenConfig = endpointTokenConfig;
|
||||
}
|
||||
if (!clientOptions.streamRate) {
|
||||
return options;
|
||||
}
|
||||
options.llmConfig.callbacks = [
|
||||
{
|
||||
handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate),
|
||||
},
|
||||
];
|
||||
clientOptions = Object.assign(
|
||||
{
|
||||
modelOptions,
|
||||
},
|
||||
clientOptions,
|
||||
);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null) {
|
||||
options.useLegacyContent = true;
|
||||
options.endpointTokenConfig = endpointTokenConfig;
|
||||
}
|
||||
if (!clientOptions.streamRate) {
|
||||
return options;
|
||||
}
|
||||
|
||||
if (clientOptions.reverseProxyUrl) {
|
||||
modelOptions.baseUrl = clientOptions.reverseProxyUrl.split('/v1')[0];
|
||||
delete clientOptions.reverseProxyUrl;
|
||||
}
|
||||
|
||||
return {
|
||||
useLegacyContent: true,
|
||||
llmConfig: modelOptions,
|
||||
};
|
||||
options.llmConfig.callbacks = [
|
||||
{
|
||||
handleLLMNewToken: createHandleLLMNewToken(clientOptions.streamRate),
|
||||
},
|
||||
];
|
||||
return options;
|
||||
}
|
||||
|
||||
const client = new OpenAIClient(apiKey, clientOptions);
|
||||
|
||||
@@ -143,7 +143,7 @@ const initializeClient = async ({
|
||||
modelOptions.model = modelName;
|
||||
clientOptions = Object.assign({ modelOptions }, clientOptions);
|
||||
clientOptions.modelOptions.user = req.user.id;
|
||||
const options = getOpenAIConfig(apiKey, clientOptions);
|
||||
const options = getOpenAIConfig(apiKey, clientOptions, endpoint);
|
||||
if (options != null && serverless === true) {
|
||||
options.useLegacyContent = true;
|
||||
}
|
||||
|
||||
@@ -42,18 +42,26 @@ async function getCustomConfigSpeech(req, res) {
|
||||
settings.advancedMode = speechTab.advancedMode;
|
||||
}
|
||||
|
||||
if (speechTab.speechToText) {
|
||||
for (const key in speechTab.speechToText) {
|
||||
if (speechTab.speechToText[key] !== undefined) {
|
||||
settings[key] = speechTab.speechToText[key];
|
||||
if (speechTab.speechToText !== undefined) {
|
||||
if (typeof speechTab.speechToText === 'boolean') {
|
||||
settings.speechToText = speechTab.speechToText;
|
||||
} else {
|
||||
for (const key in speechTab.speechToText) {
|
||||
if (speechTab.speechToText[key] !== undefined) {
|
||||
settings[key] = speechTab.speechToText[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (speechTab.textToSpeech) {
|
||||
for (const key in speechTab.textToSpeech) {
|
||||
if (speechTab.textToSpeech[key] !== undefined) {
|
||||
settings[key] = speechTab.textToSpeech[key];
|
||||
if (speechTab.textToSpeech !== undefined) {
|
||||
if (typeof speechTab.textToSpeech === 'boolean') {
|
||||
settings.textToSpeech = speechTab.textToSpeech;
|
||||
} else {
|
||||
for (const key in speechTab.textToSpeech) {
|
||||
if (speechTab.textToSpeech[key] !== undefined) {
|
||||
settings[key] = speechTab.textToSpeech[key];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ const mime = require('mime');
|
||||
const axios = require('axios');
|
||||
const fetch = require('node-fetch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getAzureContainerClient } = require('./initialize');
|
||||
const { getAzureContainerClient } = require('@librechat/api');
|
||||
|
||||
const defaultBasePath = 'images';
|
||||
const { AZURE_STORAGE_PUBLIC_ACCESS = 'true', AZURE_CONTAINER_NAME = 'files' } = process.env;
|
||||
@@ -30,7 +30,7 @@ async function saveBufferToAzure({
|
||||
containerName,
|
||||
}) {
|
||||
try {
|
||||
const containerClient = getAzureContainerClient(containerName);
|
||||
const containerClient = await getAzureContainerClient(containerName);
|
||||
const access = AZURE_STORAGE_PUBLIC_ACCESS?.toLowerCase() === 'true' ? 'blob' : undefined;
|
||||
// Create the container if it doesn't exist. This is done per operation.
|
||||
await containerClient.createIfNotExists({ access });
|
||||
@@ -84,7 +84,7 @@ async function saveURLToAzure({
|
||||
*/
|
||||
async function getAzureURL({ fileName, basePath = defaultBasePath, userId, containerName }) {
|
||||
try {
|
||||
const containerClient = getAzureContainerClient(containerName);
|
||||
const containerClient = await getAzureContainerClient(containerName);
|
||||
const blobPath = userId ? `${basePath}/${userId}/${fileName}` : `${basePath}/${fileName}`;
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(blobPath);
|
||||
return blockBlobClient.url;
|
||||
@@ -103,7 +103,7 @@ async function getAzureURL({ fileName, basePath = defaultBasePath, userId, conta
|
||||
*/
|
||||
async function deleteFileFromAzure(req, file) {
|
||||
try {
|
||||
const containerClient = getAzureContainerClient(AZURE_CONTAINER_NAME);
|
||||
const containerClient = await getAzureContainerClient(AZURE_CONTAINER_NAME);
|
||||
const blobPath = file.filepath.split(`${AZURE_CONTAINER_NAME}/`)[1];
|
||||
if (!blobPath.includes(req.user.id)) {
|
||||
throw new Error('User ID not found in blob path');
|
||||
@@ -140,7 +140,7 @@ async function streamFileToAzure({
|
||||
containerName,
|
||||
}) {
|
||||
try {
|
||||
const containerClient = getAzureContainerClient(containerName);
|
||||
const containerClient = await getAzureContainerClient(containerName);
|
||||
const access = AZURE_STORAGE_PUBLIC_ACCESS?.toLowerCase() === 'true' ? 'blob' : undefined;
|
||||
|
||||
// Create the container if it doesn't exist
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
const crud = require('./crud');
|
||||
const images = require('./images');
|
||||
const initialize = require('./initialize');
|
||||
|
||||
module.exports = {
|
||||
...crud,
|
||||
...images,
|
||||
...initialize,
|
||||
};
|
||||
|
||||
@@ -3,9 +3,9 @@ const path = require('path');
|
||||
const axios = require('axios');
|
||||
const fetch = require('node-fetch');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { getFirebaseStorage } = require('@librechat/api');
|
||||
const { ref, uploadBytes, getDownloadURL, deleteObject } = require('firebase/storage');
|
||||
const { getBufferMetadata } = require('~/server/utils');
|
||||
const { getFirebaseStorage } = require('./initialize');
|
||||
|
||||
/**
|
||||
* Deletes a file from Firebase Storage.
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
const crud = require('./crud');
|
||||
const images = require('./images');
|
||||
const initialize = require('./initialize');
|
||||
|
||||
module.exports = {
|
||||
...crud,
|
||||
...images,
|
||||
...initialize,
|
||||
};
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
const firebase = require('firebase/app');
|
||||
const { getStorage } = require('firebase/storage');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
|
||||
let i = 0;
|
||||
let firebaseApp = null;
|
||||
|
||||
const initializeFirebase = () => {
|
||||
// Return existing instance if already initialized
|
||||
if (firebaseApp) {
|
||||
return firebaseApp;
|
||||
}
|
||||
|
||||
const firebaseConfig = {
|
||||
apiKey: process.env.FIREBASE_API_KEY,
|
||||
authDomain: process.env.FIREBASE_AUTH_DOMAIN,
|
||||
projectId: process.env.FIREBASE_PROJECT_ID,
|
||||
storageBucket: process.env.FIREBASE_STORAGE_BUCKET,
|
||||
messagingSenderId: process.env.FIREBASE_MESSAGING_SENDER_ID,
|
||||
appId: process.env.FIREBASE_APP_ID,
|
||||
};
|
||||
|
||||
if (Object.values(firebaseConfig).some((value) => !value)) {
|
||||
i === 0 && logger.info('[Optional] CDN not initialized.');
|
||||
i++;
|
||||
return null;
|
||||
}
|
||||
|
||||
firebaseApp = firebase.initializeApp(firebaseConfig);
|
||||
logger.info('Firebase CDN initialized');
|
||||
return firebaseApp;
|
||||
};
|
||||
|
||||
const getFirebaseStorage = () => {
|
||||
const app = initializeFirebase();
|
||||
return app ? getStorage(app) : null;
|
||||
};
|
||||
|
||||
module.exports = { initializeFirebase, getFirebaseStorage };
|
||||
@@ -4,6 +4,7 @@ const axios = require('axios');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { generateShortLivedToken } = require('@librechat/api');
|
||||
const { resizeImageBuffer } = require('~/server/services/Files/images/resize');
|
||||
const { getBufferMetadata } = require('~/server/utils');
|
||||
const paths = require('~/config/paths');
|
||||
|
||||
@@ -286,7 +287,18 @@ async function uploadLocalFile({ req, file, file_id }) {
|
||||
await fs.promises.writeFile(newPath, inputBuffer);
|
||||
const filepath = path.posix.join('/', 'uploads', req.user.id, path.basename(newPath));
|
||||
|
||||
return { filepath, bytes };
|
||||
let height, width;
|
||||
if (file.mimetype && file.mimetype.startsWith('image/')) {
|
||||
try {
|
||||
const { width: imgWidth, height: imgHeight } = await resizeImageBuffer(inputBuffer, 'high');
|
||||
height = imgHeight;
|
||||
width = imgWidth;
|
||||
} catch (error) {
|
||||
logger.warn('[uploadLocalFile] Could not get image dimensions:', error.message);
|
||||
}
|
||||
}
|
||||
|
||||
return { filepath, bytes, height, width };
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
const fs = require('fs');
|
||||
const fetch = require('node-fetch');
|
||||
const { initializeS3 } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { FileSources } = require('librechat-data-provider');
|
||||
const { getSignedUrl } = require('@aws-sdk/s3-request-presigner');
|
||||
const {
|
||||
PutObjectCommand,
|
||||
GetObjectCommand,
|
||||
HeadObjectCommand,
|
||||
DeleteObjectCommand,
|
||||
} = require('@aws-sdk/client-s3');
|
||||
const { getSignedUrl } = require('@aws-sdk/s3-request-presigner');
|
||||
const { initializeS3 } = require('./initialize');
|
||||
|
||||
const bucketName = process.env.AWS_BUCKET_NAME;
|
||||
const defaultBasePath = 'images';
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
const crud = require('./crud');
|
||||
const images = require('./images');
|
||||
const initialize = require('./initialize');
|
||||
|
||||
module.exports = {
|
||||
...crud,
|
||||
...images,
|
||||
...initialize,
|
||||
};
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
const axios = require('axios');
|
||||
const { logAxiosError } = require('@librechat/api');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logAxiosError, processTextWithTokenLimit } = require('@librechat/api');
|
||||
const {
|
||||
FileSources,
|
||||
VisionModes,
|
||||
ImageDetail,
|
||||
ContentTypes,
|
||||
EModelEndpoint,
|
||||
mergeFileConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const countTokens = require('~/server/utils/countTokens');
|
||||
|
||||
/**
|
||||
* Converts a readable stream to a base64 encoded string.
|
||||
@@ -88,15 +86,14 @@ const blobStorageSources = new Set([FileSources.azure_blob, FileSources.s3]);
|
||||
* @param {Array<MongoFile>} files - The array of files to encode and format.
|
||||
* @param {EModelEndpoint} [endpoint] - Optional: The endpoint for the image.
|
||||
* @param {string} [mode] - Optional: The endpoint mode for the image.
|
||||
* @returns {Promise<{ text: string; files: MongoFile[]; image_urls: MessageContentImageUrl[] }>} - A promise that resolves to the result object containing the encoded images and file details.
|
||||
* @returns {Promise<{ files: MongoFile[]; image_urls: MessageContentImageUrl[] }>} - A promise that resolves to the result object containing the encoded images and file details.
|
||||
*/
|
||||
async function encodeAndFormat(req, files, endpoint, mode) {
|
||||
const promises = [];
|
||||
/** @type {Record<FileSources, Pick<ReturnType<typeof getStrategyFunctions>, 'prepareImagePayload' | 'getDownloadStream'>>} */
|
||||
const encodingMethods = {};
|
||||
/** @type {{ text: string; files: MongoFile[]; image_urls: MessageContentImageUrl[] }} */
|
||||
/** @type {{ files: MongoFile[]; image_urls: MessageContentImageUrl[] }} */
|
||||
const result = {
|
||||
text: '',
|
||||
files: [],
|
||||
image_urls: [],
|
||||
};
|
||||
@@ -105,29 +102,9 @@ async function encodeAndFormat(req, files, endpoint, mode) {
|
||||
return result;
|
||||
}
|
||||
|
||||
const fileTokenLimit =
|
||||
req.body?.fileTokenLimit ?? mergeFileConfig(req.config?.fileConfig).fileTokenLimit;
|
||||
|
||||
for (let file of files) {
|
||||
/** @type {FileSources} */
|
||||
const source = file.source ?? FileSources.local;
|
||||
if (source === FileSources.text && file.text) {
|
||||
let fileText = file.text;
|
||||
|
||||
const { text: limitedText, wasTruncated } = await processTextWithTokenLimit({
|
||||
text: fileText,
|
||||
tokenLimit: fileTokenLimit,
|
||||
tokenCountFn: (text) => countTokens(text),
|
||||
});
|
||||
|
||||
if (wasTruncated) {
|
||||
logger.debug(
|
||||
`[encodeAndFormat] Text content truncated for file: ${file.filename} due to token limits`,
|
||||
);
|
||||
}
|
||||
|
||||
result.text += `${!result.text ? 'Attached document(s):\n```md' : '\n\n---\n\n'}# "${file.filename}"\n${limitedText}\n`;
|
||||
}
|
||||
|
||||
if (!file.height) {
|
||||
promises.push([file, null]);
|
||||
@@ -165,10 +142,6 @@ async function encodeAndFormat(req, files, endpoint, mode) {
|
||||
promises.push(preparePayload(req, file));
|
||||
}
|
||||
|
||||
if (result.text) {
|
||||
result.text += '\n```';
|
||||
}
|
||||
|
||||
const detail = req.body.imageDetail ?? ImageDetail.auto;
|
||||
|
||||
/** @type {Array<[MongoFile, string]>} */
|
||||
|
||||
@@ -508,7 +508,10 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
||||
const { file } = req;
|
||||
const appConfig = req.config;
|
||||
const { agent_id, tool_resource, file_id, temp_file_id = null } = metadata;
|
||||
if (agent_id && !tool_resource) {
|
||||
|
||||
let messageAttachment = !!metadata.message_file;
|
||||
|
||||
if (agent_id && !tool_resource && !messageAttachment) {
|
||||
throw new Error('No tool resource provided for agent file upload');
|
||||
}
|
||||
|
||||
@@ -516,17 +519,11 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
||||
throw new Error('Image uploads are not supported for file search tool resources');
|
||||
}
|
||||
|
||||
let messageAttachment = !!metadata.message_file;
|
||||
if (!messageAttachment && !agent_id) {
|
||||
throw new Error('No agent ID provided for agent file upload');
|
||||
}
|
||||
|
||||
const isImage = file.mimetype.startsWith('image');
|
||||
if (!isImage && !tool_resource) {
|
||||
/** Note: this needs to be removed when we can support files to providers */
|
||||
throw new Error('No tool resource provided for non-image agent file upload');
|
||||
}
|
||||
|
||||
let fileInfoMetadata;
|
||||
const entity_id = messageAttachment === true ? undefined : agent_id;
|
||||
const basePath = mime.getType(file.originalname)?.startsWith('image') ? 'images' : 'uploads';
|
||||
@@ -601,11 +598,22 @@ const processAgentFileUpload = async ({ req, res, metadata }) => {
|
||||
if (shouldUseOCR && !(await checkCapability(req, AgentCapabilities.ocr))) {
|
||||
throw new Error('OCR capability is not enabled for Agents');
|
||||
} else if (shouldUseOCR) {
|
||||
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
|
||||
appConfig?.ocr?.strategy ?? FileSources.mistral_ocr,
|
||||
);
|
||||
const { text, bytes, filepath: ocrFileURL } = await uploadOCR({ req, file, loadAuthValues });
|
||||
return await createTextFile({ text, bytes, filepath: ocrFileURL });
|
||||
try {
|
||||
const { handleFileUpload: uploadOCR } = getStrategyFunctions(
|
||||
appConfig?.ocr?.strategy ?? FileSources.mistral_ocr,
|
||||
);
|
||||
const {
|
||||
text,
|
||||
bytes,
|
||||
filepath: ocrFileURL,
|
||||
} = await uploadOCR({ req, file, loadAuthValues });
|
||||
return await createTextFile({ text, bytes, filepath: ocrFileURL });
|
||||
} catch (ocrError) {
|
||||
logger.error(
|
||||
`[processAgentFileUpload] OCR processing failed for file "${file.originalname}", falling back to text extraction:`,
|
||||
ocrError,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const shouldUseSTT = fileConfig.checkType(
|
||||
|
||||
@@ -159,7 +159,7 @@ const searchEntraIdPrincipals = async (accessToken, sub, query, type = 'all', li
|
||||
|
||||
/**
|
||||
* Get current user's Entra ID group memberships from Microsoft Graph
|
||||
* Uses /me/memberOf endpoint to get groups the user is a member of
|
||||
* Uses /me/getMemberGroups endpoint to get transitive groups the user is a member of
|
||||
* @param {string} accessToken - OpenID Connect access token
|
||||
* @param {string} sub - Subject identifier
|
||||
* @returns {Promise<Array<string>>} Array of group ID strings (GUIDs)
|
||||
@@ -167,10 +167,12 @@ const searchEntraIdPrincipals = async (accessToken, sub, query, type = 'all', li
|
||||
const getUserEntraGroups = async (accessToken, sub) => {
|
||||
try {
|
||||
const graphClient = await createGraphClient(accessToken, sub);
|
||||
const response = await graphClient
|
||||
.api('/me/getMemberGroups')
|
||||
.post({ securityEnabledOnly: false });
|
||||
|
||||
const groupsResponse = await graphClient.api('/me/memberOf').select('id').get();
|
||||
|
||||
return (groupsResponse.value || []).map((group) => group.id);
|
||||
const groupIds = Array.isArray(response?.value) ? response.value : [];
|
||||
return [...new Set(groupIds.map((groupId) => String(groupId)))];
|
||||
} catch (error) {
|
||||
logger.error('[getUserEntraGroups] Error fetching user groups:', error);
|
||||
return [];
|
||||
@@ -187,13 +189,22 @@ const getUserEntraGroups = async (accessToken, sub) => {
|
||||
const getUserOwnedEntraGroups = async (accessToken, sub) => {
|
||||
try {
|
||||
const graphClient = await createGraphClient(accessToken, sub);
|
||||
const allGroupIds = [];
|
||||
let nextLink = '/me/ownedObjects/microsoft.graph.group';
|
||||
|
||||
const groupsResponse = await graphClient
|
||||
.api('/me/ownedObjects/microsoft.graph.group')
|
||||
.select('id')
|
||||
.get();
|
||||
while (nextLink) {
|
||||
const response = await graphClient.api(nextLink).select('id').top(999).get();
|
||||
const groups = response?.value || [];
|
||||
allGroupIds.push(...groups.map((group) => group.id));
|
||||
|
||||
return (groupsResponse.value || []).map((group) => group.id);
|
||||
nextLink = response['@odata.nextLink']
|
||||
? response['@odata.nextLink']
|
||||
.replace(/^https:\/\/graph\.microsoft\.com\/v1\.0/, '')
|
||||
.trim() || null
|
||||
: null;
|
||||
}
|
||||
|
||||
return allGroupIds;
|
||||
} catch (error) {
|
||||
logger.error('[getUserOwnedEntraGroups] Error fetching user owned groups:', error);
|
||||
return [];
|
||||
@@ -211,21 +222,27 @@ const getUserOwnedEntraGroups = async (accessToken, sub) => {
|
||||
const getGroupMembers = async (accessToken, sub, groupId) => {
|
||||
try {
|
||||
const graphClient = await createGraphClient(accessToken, sub);
|
||||
const allMembers = [];
|
||||
let nextLink = `/groups/${groupId}/members`;
|
||||
const allMembers = new Set();
|
||||
let nextLink = `/groups/${groupId}/transitiveMembers`;
|
||||
|
||||
while (nextLink) {
|
||||
const membersResponse = await graphClient.api(nextLink).select('id').top(999).get();
|
||||
|
||||
const members = membersResponse.value || [];
|
||||
allMembers.push(...members.map((member) => member.id));
|
||||
const members = membersResponse?.value || [];
|
||||
members.forEach((member) => {
|
||||
if (typeof member?.id === 'string' && member['@odata.type'] === '#microsoft.graph.user') {
|
||||
allMembers.add(member.id);
|
||||
}
|
||||
});
|
||||
|
||||
nextLink = membersResponse['@odata.nextLink']
|
||||
? membersResponse['@odata.nextLink'].split('/v1.0')[1]
|
||||
? membersResponse['@odata.nextLink']
|
||||
.replace(/^https:\/\/graph\.microsoft\.com\/v1\.0/, '')
|
||||
.trim() || null
|
||||
: null;
|
||||
}
|
||||
|
||||
return allMembers;
|
||||
return Array.from(allMembers);
|
||||
} catch (error) {
|
||||
logger.error('[getGroupMembers] Error fetching group members:', error);
|
||||
return [];
|
||||
|
||||
@@ -73,6 +73,7 @@ describe('GraphApiService', () => {
|
||||
header: jest.fn().mockReturnThis(),
|
||||
top: jest.fn().mockReturnThis(),
|
||||
get: jest.fn(),
|
||||
post: jest.fn(),
|
||||
};
|
||||
|
||||
Client.init.mockReturnValue(mockGraphClient);
|
||||
@@ -514,31 +515,33 @@ describe('GraphApiService', () => {
|
||||
});
|
||||
|
||||
describe('getUserEntraGroups', () => {
|
||||
it('should fetch user groups from memberOf endpoint', async () => {
|
||||
it('should fetch user groups using getMemberGroups endpoint', async () => {
|
||||
const mockGroupsResponse = {
|
||||
value: [
|
||||
{
|
||||
id: 'group-1',
|
||||
},
|
||||
{
|
||||
id: 'group-2',
|
||||
},
|
||||
],
|
||||
value: ['group-1', 'group-2'],
|
||||
};
|
||||
|
||||
mockGraphClient.get.mockResolvedValue(mockGroupsResponse);
|
||||
mockGraphClient.post.mockResolvedValue(mockGroupsResponse);
|
||||
|
||||
const result = await GraphApiService.getUserEntraGroups('token', 'user');
|
||||
|
||||
expect(mockGraphClient.api).toHaveBeenCalledWith('/me/memberOf');
|
||||
expect(mockGraphClient.select).toHaveBeenCalledWith('id');
|
||||
expect(mockGraphClient.api).toHaveBeenCalledWith('/me/getMemberGroups');
|
||||
expect(mockGraphClient.post).toHaveBeenCalledWith({ securityEnabledOnly: false });
|
||||
|
||||
expect(result).toEqual(['group-1', 'group-2']);
|
||||
});
|
||||
|
||||
it('should deduplicate returned group ids', async () => {
|
||||
mockGraphClient.post.mockResolvedValue({
|
||||
value: ['group-1', 'group-2', 'group-1'],
|
||||
});
|
||||
|
||||
const result = await GraphApiService.getUserEntraGroups('token', 'user');
|
||||
|
||||
expect(result).toHaveLength(2);
|
||||
expect(result).toEqual(['group-1', 'group-2']);
|
||||
});
|
||||
|
||||
it('should return empty array on error', async () => {
|
||||
mockGraphClient.get.mockRejectedValue(new Error('API error'));
|
||||
mockGraphClient.post.mockRejectedValue(new Error('API error'));
|
||||
|
||||
const result = await GraphApiService.getUserEntraGroups('token', 'user');
|
||||
|
||||
@@ -550,7 +553,7 @@ describe('GraphApiService', () => {
|
||||
value: [],
|
||||
};
|
||||
|
||||
mockGraphClient.get.mockResolvedValue(mockGroupsResponse);
|
||||
mockGraphClient.post.mockResolvedValue(mockGroupsResponse);
|
||||
|
||||
const result = await GraphApiService.getUserEntraGroups('token', 'user');
|
||||
|
||||
@@ -558,7 +561,7 @@ describe('GraphApiService', () => {
|
||||
});
|
||||
|
||||
it('should handle missing value property', async () => {
|
||||
mockGraphClient.get.mockResolvedValue({});
|
||||
mockGraphClient.post.mockResolvedValue({});
|
||||
|
||||
const result = await GraphApiService.getUserEntraGroups('token', 'user');
|
||||
|
||||
@@ -566,6 +569,89 @@ describe('GraphApiService', () => {
|
||||
});
|
||||
});
|
||||
|
||||
describe('getUserOwnedEntraGroups', () => {
|
||||
it('should fetch owned groups with pagination support', async () => {
|
||||
const firstPage = {
|
||||
value: [
|
||||
{
|
||||
id: 'owned-group-1',
|
||||
},
|
||||
],
|
||||
'@odata.nextLink':
|
||||
'https://graph.microsoft.com/v1.0/me/ownedObjects/microsoft.graph.group?$skiptoken=xyz',
|
||||
};
|
||||
|
||||
const secondPage = {
|
||||
value: [
|
||||
{
|
||||
id: 'owned-group-2',
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
mockGraphClient.get.mockResolvedValueOnce(firstPage).mockResolvedValueOnce(secondPage);
|
||||
|
||||
const result = await GraphApiService.getUserOwnedEntraGroups('token', 'user');
|
||||
|
||||
expect(mockGraphClient.api).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
'/me/ownedObjects/microsoft.graph.group',
|
||||
);
|
||||
expect(mockGraphClient.api).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
'/me/ownedObjects/microsoft.graph.group?$skiptoken=xyz',
|
||||
);
|
||||
expect(mockGraphClient.top).toHaveBeenCalledWith(999);
|
||||
expect(mockGraphClient.get).toHaveBeenCalledTimes(2);
|
||||
|
||||
expect(result).toEqual(['owned-group-1', 'owned-group-2']);
|
||||
});
|
||||
|
||||
it('should return empty array on error', async () => {
|
||||
mockGraphClient.get.mockRejectedValue(new Error('API error'));
|
||||
|
||||
const result = await GraphApiService.getUserOwnedEntraGroups('token', 'user');
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('getGroupMembers', () => {
|
||||
it('should fetch transitive members and include only users', async () => {
|
||||
const firstPage = {
|
||||
value: [
|
||||
{ id: 'user-1', '@odata.type': '#microsoft.graph.user' },
|
||||
{ id: 'child-group', '@odata.type': '#microsoft.graph.group' },
|
||||
],
|
||||
'@odata.nextLink':
|
||||
'https://graph.microsoft.com/v1.0/groups/group-id/transitiveMembers?$skiptoken=abc',
|
||||
};
|
||||
const secondPage = {
|
||||
value: [{ id: 'user-2', '@odata.type': '#microsoft.graph.user' }],
|
||||
};
|
||||
|
||||
mockGraphClient.get.mockResolvedValueOnce(firstPage).mockResolvedValueOnce(secondPage);
|
||||
|
||||
const result = await GraphApiService.getGroupMembers('token', 'user', 'group-id');
|
||||
|
||||
expect(mockGraphClient.api).toHaveBeenNthCalledWith(1, '/groups/group-id/transitiveMembers');
|
||||
expect(mockGraphClient.api).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
'/groups/group-id/transitiveMembers?$skiptoken=abc',
|
||||
);
|
||||
expect(mockGraphClient.top).toHaveBeenCalledWith(999);
|
||||
expect(result).toEqual(['user-1', 'user-2']);
|
||||
});
|
||||
|
||||
it('should return empty array on error', async () => {
|
||||
mockGraphClient.get.mockRejectedValue(new Error('API error'));
|
||||
|
||||
const result = await GraphApiService.getGroupMembers('token', 'user', 'group-id');
|
||||
|
||||
expect(result).toEqual([]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('testGraphApiAccess', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
@@ -450,7 +450,7 @@ async function getMCPSetupData(userId) {
|
||||
logger.error(`[MCP][User: ${userId}] Error getting app connections:`, error);
|
||||
}
|
||||
const userConnections = mcpManager.getUserConnections(userId) || new Map();
|
||||
const oauthServers = mcpManager.getOAuthServers() || new Set();
|
||||
const oauthServers = mcpManager.getOAuthServers();
|
||||
|
||||
return {
|
||||
mcpConfig,
|
||||
|
||||
@@ -170,7 +170,7 @@ describe('tests for the new helper functions used by the MCP connection status e
|
||||
const mockMCPManager = {
|
||||
appConnections: { getAll: jest.fn(() => null) },
|
||||
getUserConnections: jest.fn(() => null),
|
||||
getOAuthServers: jest.fn(() => null),
|
||||
getOAuthServers: jest.fn(() => new Set()),
|
||||
};
|
||||
mockGetMCPManager.mockReturnValue(mockMCPManager);
|
||||
|
||||
|
||||
@@ -39,6 +39,8 @@ const { openAIApiKey, userProvidedOpenAI } = require('./Config/EndpointService')
|
||||
* @param {boolean} [params.userIdQuery=false] - Whether to send the user ID as a query parameter.
|
||||
* @param {boolean} [params.createTokenConfig=true] - Whether to create a token configuration from the API response.
|
||||
* @param {string} [params.tokenKey] - The cache key to save the token configuration. Uses `name` if omitted.
|
||||
* @param {Record<string, string>} [params.headers] - Optional headers for the request.
|
||||
* @param {Partial<IUser>} [params.userObject] - Optional user object for header resolution.
|
||||
* @returns {Promise<string[]>} A promise that resolves to an array of model identifiers.
|
||||
* @async
|
||||
*/
|
||||
@@ -52,6 +54,8 @@ const fetchModels = async ({
|
||||
userIdQuery = false,
|
||||
createTokenConfig = true,
|
||||
tokenKey,
|
||||
headers,
|
||||
userObject,
|
||||
}) => {
|
||||
let models = [];
|
||||
const baseURL = direct ? extractBaseURL(_baseURL) : _baseURL;
|
||||
@@ -65,7 +69,13 @@ const fetchModels = async ({
|
||||
}
|
||||
|
||||
if (name && name.toLowerCase().startsWith(Providers.OLLAMA)) {
|
||||
return await OllamaClient.fetchModels(baseURL);
|
||||
try {
|
||||
return await OllamaClient.fetchModels(baseURL, { headers, user: userObject });
|
||||
} catch (ollamaError) {
|
||||
const logMessage =
|
||||
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.';
|
||||
logAxiosError({ message: logMessage, error: ollamaError });
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
const axios = require('axios');
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { logAxiosError, resolveHeaders } = require('@librechat/api');
|
||||
const { EModelEndpoint, defaultModels } = require('librechat-data-provider');
|
||||
|
||||
const {
|
||||
@@ -18,6 +18,8 @@ jest.mock('@librechat/api', () => {
|
||||
processModelData: jest.fn((...args) => {
|
||||
return originalUtils.processModelData(...args);
|
||||
}),
|
||||
logAxiosError: jest.fn(),
|
||||
resolveHeaders: jest.fn((options) => options?.headers || {}),
|
||||
};
|
||||
});
|
||||
|
||||
@@ -277,12 +279,51 @@ describe('fetchModels with Ollama specific logic', () => {
|
||||
|
||||
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
|
||||
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
|
||||
headers: {},
|
||||
timeout: 5000,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle errors gracefully when fetching Ollama models fails', async () => {
|
||||
axios.get.mockRejectedValue(new Error('Network error'));
|
||||
it('should pass headers and user object to Ollama fetchModels', async () => {
|
||||
const customHeaders = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: 'Bearer custom-token',
|
||||
};
|
||||
const userObject = {
|
||||
id: 'user789',
|
||||
email: 'test@example.com',
|
||||
};
|
||||
|
||||
resolveHeaders.mockReturnValueOnce(customHeaders);
|
||||
|
||||
const models = await fetchModels({
|
||||
user: 'user789',
|
||||
apiKey: 'testApiKey',
|
||||
baseURL: 'https://api.ollama.test.com',
|
||||
name: 'ollama',
|
||||
headers: customHeaders,
|
||||
userObject,
|
||||
});
|
||||
|
||||
expect(models).toEqual(['Ollama-Base', 'Ollama-Advanced']);
|
||||
expect(resolveHeaders).toHaveBeenCalledWith({
|
||||
headers: customHeaders,
|
||||
user: userObject,
|
||||
});
|
||||
expect(axios.get).toHaveBeenCalledWith('https://api.ollama.test.com/api/tags', {
|
||||
headers: customHeaders,
|
||||
timeout: 5000,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle errors gracefully when fetching Ollama models fails and fallback to OpenAI-compatible fetch', async () => {
|
||||
axios.get.mockRejectedValueOnce(new Error('Ollama API error'));
|
||||
axios.get.mockResolvedValueOnce({
|
||||
data: {
|
||||
data: [{ id: 'fallback-model-1' }, { id: 'fallback-model-2' }],
|
||||
},
|
||||
});
|
||||
|
||||
const models = await fetchModels({
|
||||
user: 'user789',
|
||||
apiKey: 'testApiKey',
|
||||
@@ -290,8 +331,13 @@ describe('fetchModels with Ollama specific logic', () => {
|
||||
name: 'OllamaAPI',
|
||||
});
|
||||
|
||||
expect(models).toEqual([]);
|
||||
expect(logger.error).toHaveBeenCalled();
|
||||
expect(models).toEqual(['fallback-model-1', 'fallback-model-2']);
|
||||
expect(logAxiosError).toHaveBeenCalledWith({
|
||||
message:
|
||||
'Failed to fetch models from Ollama API. Attempting to fetch via OpenAI-compatible endpoint.',
|
||||
error: expect.any(Error),
|
||||
});
|
||||
expect(axios.get).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should return an empty array if no baseURL is provided', async () => {
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { normalizeEndpointName } = require('@librechat/api');
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
|
||||
/**
|
||||
* Sets up Model Specs from the config (`librechat.yaml`) file.
|
||||
* @param {TCustomConfig['endpoints']} [endpoints] - The loaded custom configuration for endpoints.
|
||||
* @param {TCustomConfig['modelSpecs'] | undefined} [modelSpecs] - The loaded custom configuration for model specs.
|
||||
* @param {TCustomConfig['interface'] | undefined} [interfaceConfig] - The loaded interface configuration.
|
||||
* @returns {TCustomConfig['modelSpecs'] | undefined} The processed model specs, if any.
|
||||
*/
|
||||
function processModelSpecs(endpoints, _modelSpecs, interfaceConfig) {
|
||||
if (!_modelSpecs) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/** @type {TCustomConfig['modelSpecs']['list']} */
|
||||
const modelSpecs = [];
|
||||
/** @type {TCustomConfig['modelSpecs']['list']} */
|
||||
const list = _modelSpecs.list;
|
||||
|
||||
const customEndpoints = endpoints?.[EModelEndpoint.custom] ?? [];
|
||||
|
||||
if (interfaceConfig.modelSelect !== true && (_modelSpecs.addedEndpoints?.length ?? 0) > 0) {
|
||||
logger.warn(
|
||||
`To utilize \`addedEndpoints\`, which allows provider/model selections alongside model specs, set \`modelSelect: true\` in the interface configuration.
|
||||
|
||||
Example:
|
||||
\`\`\`yaml
|
||||
interface:
|
||||
modelSelect: true
|
||||
\`\`\`
|
||||
`,
|
||||
);
|
||||
}
|
||||
|
||||
for (const spec of list) {
|
||||
if (EModelEndpoint[spec.preset.endpoint] && spec.preset.endpoint !== EModelEndpoint.custom) {
|
||||
modelSpecs.push(spec);
|
||||
continue;
|
||||
} else if (spec.preset.endpoint === EModelEndpoint.custom) {
|
||||
logger.warn(
|
||||
`Model Spec with endpoint "${spec.preset.endpoint}" is not supported. You must specify the name of the custom endpoint (case-sensitive, as defined in your config). Skipping model spec...`,
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const normalizedName = normalizeEndpointName(spec.preset.endpoint);
|
||||
const endpoint = customEndpoints.find(
|
||||
(customEndpoint) => normalizedName === normalizeEndpointName(customEndpoint.name),
|
||||
);
|
||||
|
||||
if (!endpoint) {
|
||||
logger.warn(`Model spec with endpoint "${spec.preset.endpoint}" was skipped: Endpoint not found in configuration. The \`endpoint\` value must exactly match either a system-defined endpoint or a custom endpoint defined by the user.
|
||||
|
||||
For more information, see the documentation at https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/model_specs#endpoint`);
|
||||
continue;
|
||||
}
|
||||
|
||||
modelSpecs.push({
|
||||
...spec,
|
||||
preset: {
|
||||
...spec.preset,
|
||||
endpoint: normalizedName,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
..._modelSpecs,
|
||||
list: modelSpecs,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = { processModelSpecs };
|
||||
@@ -1,44 +0,0 @@
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
const { removeNullishValues } = require('librechat-data-provider');
|
||||
|
||||
/**
|
||||
* Loads and maps the Cloudflare Turnstile configuration.
|
||||
*
|
||||
* Expected config structure:
|
||||
*
|
||||
* turnstile:
|
||||
* siteKey: "your-site-key-here"
|
||||
* options:
|
||||
* language: "auto" // "auto" or an ISO 639-1 language code (e.g. en)
|
||||
* size: "normal" // Options: "normal", "compact", "flexible", or "invisible"
|
||||
*
|
||||
* @param {TCustomConfig | undefined} config - The loaded custom configuration.
|
||||
* @param {TConfigDefaults} configDefaults - The custom configuration default values.
|
||||
* @returns {TCustomConfig['turnstile']} The mapped Turnstile configuration.
|
||||
*/
|
||||
function loadTurnstileConfig(config, configDefaults) {
|
||||
const { turnstile: customTurnstile = {} } = config ?? {};
|
||||
const { turnstile: defaults = {} } = configDefaults;
|
||||
|
||||
/** @type {TCustomConfig['turnstile']} */
|
||||
const loadedTurnstile = removeNullishValues({
|
||||
siteKey: customTurnstile.siteKey ?? defaults.siteKey,
|
||||
options: customTurnstile.options ?? defaults.options,
|
||||
});
|
||||
|
||||
const enabled = Boolean(loadedTurnstile.siteKey);
|
||||
|
||||
if (enabled) {
|
||||
logger.info(
|
||||
'Turnstile is ENABLED with configuration:\n' + JSON.stringify(loadedTurnstile, null, 2),
|
||||
);
|
||||
} else {
|
||||
logger.info('Turnstile is DISABLED (no siteKey provided).');
|
||||
}
|
||||
|
||||
return loadedTurnstile;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
loadTurnstileConfig,
|
||||
};
|
||||
@@ -10,6 +10,15 @@ const importConversations = async (job) => {
|
||||
const { filepath, requestUserId } = job;
|
||||
try {
|
||||
logger.debug(`user: ${requestUserId} | Importing conversation(s) from file...`);
|
||||
|
||||
/* error if file is too large */
|
||||
const fileInfo = await fs.stat(filepath);
|
||||
if (fileInfo.size > process.env.CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES) {
|
||||
throw new Error(
|
||||
`File size is ${fileInfo.size} bytes. It exceeds the maximum limit of ${process.env.CONVERSATION_IMPORT_MAX_FILE_SIZE_BYTES} bytes.`,
|
||||
);
|
||||
}
|
||||
|
||||
const fileData = await fs.readFile(filepath, 'utf8');
|
||||
const jsonData = JSON.parse(fileData);
|
||||
const importer = getImporter(jsonData);
|
||||
@@ -17,6 +26,7 @@ const importConversations = async (job) => {
|
||||
logger.debug(`user: ${requestUserId} | Finished importing conversations`);
|
||||
} catch (error) {
|
||||
logger.error(`user: ${requestUserId} | Failed to import conversation: `, error);
|
||||
throw error; // throw error all the way up so request does not return success
|
||||
} finally {
|
||||
try {
|
||||
await fs.unlink(filepath);
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
const undici = require('undici');
|
||||
const { get } = require('lodash');
|
||||
const fetch = require('node-fetch');
|
||||
const passport = require('passport');
|
||||
const client = require('openid-client');
|
||||
@@ -329,6 +330,12 @@ async function setupOpenId() {
|
||||
: 'OPENID_GENERATE_NONCE=false - Standard flow without explicit nonce or metadata',
|
||||
});
|
||||
|
||||
// Set of env variables that specify how to set if a user is an admin
|
||||
// If not set, all users will be treated as regular users
|
||||
const adminRole = process.env.OPENID_ADMIN_ROLE;
|
||||
const adminRoleParameterPath = process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH;
|
||||
const adminRoleTokenKind = process.env.OPENID_ADMIN_ROLE_TOKEN_KIND;
|
||||
|
||||
const openidLogin = new CustomOpenIDStrategy(
|
||||
{
|
||||
config: openidConfig,
|
||||
@@ -350,16 +357,18 @@ async function setupOpenId() {
|
||||
};
|
||||
|
||||
const appConfig = await getAppConfig();
|
||||
if (!isEmailDomainAllowed(userinfo.email, appConfig?.registration?.allowedDomains)) {
|
||||
/** Azure AD sometimes doesn't return email, use preferred_username as fallback */
|
||||
const email = userinfo.email || userinfo.preferred_username || userinfo.upn;
|
||||
if (!isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
|
||||
logger.error(
|
||||
`[OpenID Strategy] Authentication blocked - email domain not allowed [Email: ${userinfo.email}]`,
|
||||
`[OpenID Strategy] Authentication blocked - email domain not allowed [Email: ${email}]`,
|
||||
);
|
||||
return done(null, false, { message: 'Email domain not allowed' });
|
||||
}
|
||||
|
||||
const result = await findOpenIDUser({
|
||||
findUser,
|
||||
email: claims.email,
|
||||
email: email,
|
||||
openidId: claims.sub,
|
||||
idOnTheSource: claims.oid,
|
||||
strategyName: 'openidStrategy',
|
||||
@@ -386,20 +395,19 @@ async function setupOpenId() {
|
||||
} else if (requiredRoleTokenKind === 'id') {
|
||||
decodedToken = jwtDecode(tokenset.id_token);
|
||||
}
|
||||
const pathParts = requiredRoleParameterPath.split('.');
|
||||
let found = true;
|
||||
let roles = pathParts.reduce((o, key) => {
|
||||
if (o === null || o === undefined || !(key in o)) {
|
||||
found = false;
|
||||
return [];
|
||||
}
|
||||
return o[key];
|
||||
}, decodedToken);
|
||||
|
||||
if (!found) {
|
||||
let roles = get(decodedToken, requiredRoleParameterPath);
|
||||
if (!roles || (!Array.isArray(roles) && typeof roles !== 'string')) {
|
||||
logger.error(
|
||||
`[openidStrategy] Key '${requiredRoleParameterPath}' not found in ${requiredRoleTokenKind} token!`,
|
||||
`[openidStrategy] Key '${requiredRoleParameterPath}' not found or invalid type in ${requiredRoleTokenKind} token!`,
|
||||
);
|
||||
const rolesList =
|
||||
requiredRoles.length === 1
|
||||
? `"${requiredRoles[0]}"`
|
||||
: `one of: ${requiredRoles.map((r) => `"${r}"`).join(', ')}`;
|
||||
return done(null, false, {
|
||||
message: `You must have ${rolesList} role to log in.`,
|
||||
});
|
||||
}
|
||||
|
||||
if (!requiredRoles.some((role) => roles.includes(role))) {
|
||||
@@ -427,7 +435,7 @@ async function setupOpenId() {
|
||||
provider: 'openid',
|
||||
openidId: userinfo.sub,
|
||||
username,
|
||||
email: userinfo.email || '',
|
||||
email: email || '',
|
||||
emailVerified: userinfo.email_verified || false,
|
||||
name: fullName,
|
||||
idOnTheSource: userinfo.oid,
|
||||
@@ -441,12 +449,56 @@ async function setupOpenId() {
|
||||
user.username = username;
|
||||
user.name = fullName;
|
||||
user.idOnTheSource = userinfo.oid;
|
||||
if (userinfo.email && userinfo.email !== user.email) {
|
||||
user.email = userinfo.email;
|
||||
if (email && email !== user.email) {
|
||||
user.email = email;
|
||||
user.emailVerified = userinfo.email_verified || false;
|
||||
}
|
||||
}
|
||||
|
||||
if (adminRole && adminRoleParameterPath && adminRoleTokenKind) {
|
||||
let adminRoleObject;
|
||||
switch (adminRoleTokenKind) {
|
||||
case 'access':
|
||||
adminRoleObject = jwtDecode(tokenset.access_token);
|
||||
break;
|
||||
case 'id':
|
||||
adminRoleObject = jwtDecode(tokenset.id_token);
|
||||
break;
|
||||
case 'userinfo':
|
||||
adminRoleObject = userinfo;
|
||||
break;
|
||||
default:
|
||||
logger.error(
|
||||
`[openidStrategy] Invalid admin role token kind: ${adminRoleTokenKind}. Must be one of 'access', 'id', or 'userinfo'.`,
|
||||
);
|
||||
return done(new Error('Invalid admin role token kind'));
|
||||
}
|
||||
|
||||
const adminRoles = get(adminRoleObject, adminRoleParameterPath);
|
||||
|
||||
// Accept 3 types of values for the object extracted from adminRoleParameterPath:
|
||||
// 1. A boolean value indicating if the user is an admin
|
||||
// 2. A string with a single role name
|
||||
// 3. An array of role names
|
||||
|
||||
if (
|
||||
adminRoles &&
|
||||
(adminRoles === true ||
|
||||
adminRoles === adminRole ||
|
||||
(Array.isArray(adminRoles) && adminRoles.includes(adminRole)))
|
||||
) {
|
||||
user.role = 'ADMIN';
|
||||
logger.info(
|
||||
`[openidStrategy] User ${username} is an admin based on role: ${adminRole}`,
|
||||
);
|
||||
} else if (user.role === 'ADMIN') {
|
||||
user.role = 'USER';
|
||||
logger.info(
|
||||
`[openidStrategy] User ${username} demoted from admin - role no longer present in token`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (!!userinfo && userinfo.picture && !user.avatar?.includes('manual=true')) {
|
||||
/** @type {string | undefined} */
|
||||
const imageUrl = userinfo.picture;
|
||||
|
||||
@@ -125,6 +125,9 @@ describe('setupOpenId', () => {
|
||||
process.env.OPENID_REQUIRED_ROLE = 'requiredRole';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'roles';
|
||||
process.env.OPENID_REQUIRED_ROLE_TOKEN_KIND = 'id';
|
||||
process.env.OPENID_ADMIN_ROLE = 'admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'permissions';
|
||||
process.env.OPENID_ADMIN_ROLE_TOKEN_KIND = 'id';
|
||||
delete process.env.OPENID_USERNAME_CLAIM;
|
||||
delete process.env.OPENID_NAME_CLAIM;
|
||||
delete process.env.PROXY;
|
||||
@@ -133,6 +136,7 @@ describe('setupOpenId', () => {
|
||||
// Default jwtDecode mock returns a token that includes the required role.
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
permissions: ['admin'],
|
||||
});
|
||||
|
||||
// By default, assume that no user is found, so createUser will be called
|
||||
@@ -441,4 +445,475 @@ describe('setupOpenId', () => {
|
||||
expect(callOptions.usePKCE).toBe(false);
|
||||
expect(callOptions.params?.code_challenge_method).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should set role to "ADMIN" if OPENID_ADMIN_ROLE is set and user has that role', async () => {
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert – verify that the user role is set to "ADMIN"
|
||||
expect(user.role).toBe('ADMIN');
|
||||
});
|
||||
|
||||
it('should not set user role if OPENID_ADMIN_ROLE is set but the user does not have that role', async () => {
|
||||
// Arrange – simulate a token without the admin permission
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
permissions: ['not-admin'],
|
||||
});
|
||||
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert – verify that the user role is not defined
|
||||
expect(user.role).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should demote existing admin user when admin role is removed from token', async () => {
|
||||
// Arrange – simulate an existing user who is currently an admin
|
||||
const existingAdminUser = {
|
||||
_id: 'existingAdminId',
|
||||
provider: 'openid',
|
||||
email: tokenset.claims().email,
|
||||
openidId: tokenset.claims().sub,
|
||||
username: 'adminuser',
|
||||
name: 'Admin User',
|
||||
role: 'ADMIN',
|
||||
};
|
||||
|
||||
findUser.mockImplementation(async (query) => {
|
||||
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
|
||||
return existingAdminUser;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
// Token without admin permission
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
permissions: ['not-admin'],
|
||||
});
|
||||
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert – verify that the user was demoted
|
||||
expect(user.role).toBe('USER');
|
||||
expect(updateUser).toHaveBeenCalledWith(
|
||||
existingAdminUser._id,
|
||||
expect.objectContaining({
|
||||
role: 'USER',
|
||||
}),
|
||||
);
|
||||
expect(logger.info).toHaveBeenCalledWith(
|
||||
expect.stringContaining('demoted from admin - role no longer present in token'),
|
||||
);
|
||||
});
|
||||
|
||||
it('should NOT demote admin user when admin role env vars are not configured', async () => {
|
||||
// Arrange – remove admin role env vars
|
||||
delete process.env.OPENID_ADMIN_ROLE;
|
||||
delete process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH;
|
||||
delete process.env.OPENID_ADMIN_ROLE_TOKEN_KIND;
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
// Simulate an existing admin user
|
||||
const existingAdminUser = {
|
||||
_id: 'existingAdminId',
|
||||
provider: 'openid',
|
||||
email: tokenset.claims().email,
|
||||
openidId: tokenset.claims().sub,
|
||||
username: 'adminuser',
|
||||
name: 'Admin User',
|
||||
role: 'ADMIN',
|
||||
};
|
||||
|
||||
findUser.mockImplementation(async (query) => {
|
||||
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
|
||||
return existingAdminUser;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
});
|
||||
|
||||
// Act
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
// Assert – verify that the admin user was NOT demoted
|
||||
expect(user.role).toBe('ADMIN');
|
||||
expect(updateUser).toHaveBeenCalledWith(
|
||||
existingAdminUser._id,
|
||||
expect.objectContaining({
|
||||
role: 'ADMIN',
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
describe('lodash get - nested path extraction', () => {
|
||||
it('should extract roles from deeply nested token path', async () => {
|
||||
process.env.OPENID_REQUIRED_ROLE = 'app-user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'resource_access.my-client.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
resource_access: {
|
||||
'my-client': {
|
||||
roles: ['app-user', 'viewer'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user).toBeTruthy();
|
||||
expect(user.email).toBe(tokenset.claims().email);
|
||||
});
|
||||
|
||||
it('should extract roles from three-level nested path', async () => {
|
||||
process.env.OPENID_REQUIRED_ROLE = 'editor';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'data.access.permissions.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
data: {
|
||||
access: {
|
||||
permissions: {
|
||||
roles: ['editor', 'reader'],
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should log error and reject login when required role path does not exist in token', async () => {
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
process.env.OPENID_REQUIRED_ROLE = 'app-user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'resource_access.nonexistent.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
resource_access: {
|
||||
'my-client': {
|
||||
roles: ['app-user'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user, details } = await validate(tokenset);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
"Key 'resource_access.nonexistent.roles' not found or invalid type in id token!",
|
||||
),
|
||||
);
|
||||
expect(user).toBe(false);
|
||||
expect(details.message).toContain('role to log in');
|
||||
});
|
||||
|
||||
it('should handle missing intermediate nested path gracefully', async () => {
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
process.env.OPENID_REQUIRED_ROLE = 'user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'org.team.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
org: {
|
||||
other: 'value',
|
||||
},
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Key 'org.team.roles' not found or invalid type in id token!"),
|
||||
);
|
||||
expect(user).toBe(false);
|
||||
});
|
||||
|
||||
it('should extract admin role from nested path in access token', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'realm_access.roles';
|
||||
process.env.OPENID_ADMIN_ROLE_TOKEN_KIND = 'access';
|
||||
|
||||
jwtDecode.mockImplementation((token) => {
|
||||
if (token === 'fake_access_token') {
|
||||
return {
|
||||
realm_access: {
|
||||
roles: ['admin', 'user'],
|
||||
},
|
||||
};
|
||||
}
|
||||
return {
|
||||
roles: ['requiredRole'],
|
||||
};
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user.role).toBe('ADMIN');
|
||||
});
|
||||
|
||||
it('should extract admin role from nested path in userinfo', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'organization.permissions';
|
||||
process.env.OPENID_ADMIN_ROLE_TOKEN_KIND = 'userinfo';
|
||||
|
||||
const userinfoWithNestedGroups = {
|
||||
...tokenset.claims(),
|
||||
organization: {
|
||||
permissions: ['admin', 'write'],
|
||||
},
|
||||
};
|
||||
|
||||
require('openid-client').fetchUserInfo.mockResolvedValue({
|
||||
organization: {
|
||||
permissions: ['admin', 'write'],
|
||||
},
|
||||
});
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate({
|
||||
...tokenset,
|
||||
claims: () => userinfoWithNestedGroups,
|
||||
});
|
||||
|
||||
expect(user.role).toBe('ADMIN');
|
||||
});
|
||||
|
||||
it('should handle boolean admin role value', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'is_admin';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
is_admin: true,
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user.role).toBe('ADMIN');
|
||||
});
|
||||
|
||||
it('should handle string admin role value matching exactly', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'super-admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'role';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
role: 'super-admin',
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user.role).toBe('ADMIN');
|
||||
});
|
||||
|
||||
it('should not set admin role when string value does not match', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'super-admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'role';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
role: 'regular-user',
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user.role).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle array admin role value', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'site-admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'app_roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
app_roles: ['user', 'site-admin', 'moderator'],
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user.role).toBe('ADMIN');
|
||||
});
|
||||
|
||||
it('should not set admin when role is not in array', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'site-admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'app_roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole'],
|
||||
app_roles: ['user', 'moderator'],
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user.role).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should handle nested path with special characters in keys', async () => {
|
||||
process.env.OPENID_REQUIRED_ROLE = 'app-user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'resource_access.my-app-123.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
resource_access: {
|
||||
'my-app-123': {
|
||||
roles: ['app-user'],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(user).toBeTruthy();
|
||||
});
|
||||
|
||||
it('should handle empty object at nested path', async () => {
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
process.env.OPENID_REQUIRED_ROLE = 'user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'access.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
access: {},
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Key 'access.roles' not found or invalid type in id token!"),
|
||||
);
|
||||
expect(user).toBe(false);
|
||||
});
|
||||
|
||||
it('should handle null value at intermediate path', async () => {
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
process.env.OPENID_REQUIRED_ROLE = 'user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'data.roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
data: null,
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Key 'data.roles' not found or invalid type in id token!"),
|
||||
);
|
||||
expect(user).toBe(false);
|
||||
});
|
||||
|
||||
it('should reject login with invalid admin role token kind', async () => {
|
||||
process.env.OPENID_ADMIN_ROLE = 'admin';
|
||||
process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH = 'roles';
|
||||
process.env.OPENID_ADMIN_ROLE_TOKEN_KIND = 'invalid';
|
||||
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: ['requiredRole', 'admin'],
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
await expect(validate(tokenset)).rejects.toThrow('Invalid admin role token kind');
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining(
|
||||
"Invalid admin role token kind: invalid. Must be one of 'access', 'id', or 'userinfo'",
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
it('should reject login when roles path returns invalid type (object)', async () => {
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
process.env.OPENID_REQUIRED_ROLE = 'app-user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'roles';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roles: { admin: true, user: false },
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user, details } = await validate(tokenset);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Key 'roles' not found or invalid type in id token!"),
|
||||
);
|
||||
expect(user).toBe(false);
|
||||
expect(details.message).toContain('role to log in');
|
||||
});
|
||||
|
||||
it('should reject login when roles path returns invalid type (number)', async () => {
|
||||
const { logger } = require('@librechat/data-schemas');
|
||||
process.env.OPENID_REQUIRED_ROLE = 'user';
|
||||
process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH = 'roleCount';
|
||||
|
||||
jwtDecode.mockReturnValue({
|
||||
roleCount: 5,
|
||||
});
|
||||
|
||||
await setupOpenId();
|
||||
verifyCallback = require('openid-client/passport').__getVerifyCallback();
|
||||
|
||||
const { user } = await validate(tokenset);
|
||||
|
||||
expect(logger.error).toHaveBeenCalledWith(
|
||||
expect.stringContaining("Key 'roleCount' not found or invalid type in id token!"),
|
||||
);
|
||||
expect(user).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1054,7 +1054,7 @@
|
||||
|
||||
/**
|
||||
* @exports TWebSearchKeys
|
||||
* @typedef {import('librechat-data-provider').TWebSearchKeys} TWebSearchKeys
|
||||
* @typedef {import('@librechat/data-schemas').TWebSearchKeys} TWebSearchKeys
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
@@ -1103,13 +1103,13 @@
|
||||
|
||||
/**
|
||||
* @exports AppConfig
|
||||
* @typedef {import('@librechat/api').AppConfig} AppConfig
|
||||
* @typedef {import('@librechat/data-schemas').AppConfig} AppConfig
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
/**
|
||||
* @exports JsonSchemaType
|
||||
* @typedef {import('@librechat/api').JsonSchemaType} JsonSchemaType
|
||||
* @typedef {import('@librechat/data-schemas').JsonSchemaType} JsonSchemaType
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
@@ -1371,12 +1371,7 @@
|
||||
|
||||
/**
|
||||
* @exports FunctionTool
|
||||
* @typedef {Object} FunctionTool
|
||||
* @property {'function'} type - The type of tool, 'function'.
|
||||
* @property {Object} function - The function definition.
|
||||
* @property {string} function.description - A description of what the function does.
|
||||
* @property {string} function.name - The name of the function to be called.
|
||||
* @property {Object} function.parameters - The parameters the function accepts, described as a JSON Schema object.
|
||||
* @typedef {import('@librechat/data-schemas').FunctionTool} FunctionTool
|
||||
* @memberof typedefs
|
||||
*/
|
||||
|
||||
|
||||
@@ -186,6 +186,19 @@ describe('getModelMaxTokens', () => {
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for gpt-5-pro matches', () => {
|
||||
expect(getModelMaxTokens('gpt-5-pro')).toBe(maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro']);
|
||||
expect(getModelMaxTokens('gpt-5-pro-preview')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro'],
|
||||
);
|
||||
expect(getModelMaxTokens('openai/gpt-5-pro')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro'],
|
||||
);
|
||||
expect(getModelMaxTokens('gpt-5-pro-2025-01-30')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['gpt-5-pro'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for Anthropic models', () => {
|
||||
const models = [
|
||||
'claude-2.1',
|
||||
@@ -396,15 +409,80 @@ describe('getModelMaxTokens', () => {
|
||||
});
|
||||
|
||||
test('should return correct tokens for GPT-OSS models', () => {
|
||||
const expected = maxTokensMap[EModelEndpoint.openAI]['gpt-oss-20b'];
|
||||
['gpt-oss-20b', 'gpt-oss-120b', 'openai/gpt-oss-20b', 'openai/gpt-oss-120b'].forEach((name) => {
|
||||
const expected = maxTokensMap[EModelEndpoint.openAI]['gpt-oss'];
|
||||
[
|
||||
'gpt-oss:20b',
|
||||
'gpt-oss-20b',
|
||||
'gpt-oss-120b',
|
||||
'openai/gpt-oss-20b',
|
||||
'openai/gpt-oss-120b',
|
||||
'openai/gpt-oss:120b',
|
||||
].forEach((name) => {
|
||||
expect(getModelMaxTokens(name)).toBe(expected);
|
||||
});
|
||||
});
|
||||
|
||||
test('should return correct tokens for GLM models', () => {
|
||||
expect(getModelMaxTokens('glm-4.6')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.6']);
|
||||
expect(getModelMaxTokens('glm-4.5v')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.5v']);
|
||||
expect(getModelMaxTokens('glm-4.5-air')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5-air'],
|
||||
);
|
||||
expect(getModelMaxTokens('glm-4.5')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.5']);
|
||||
expect(getModelMaxTokens('glm-4-32b')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4-32b']);
|
||||
expect(getModelMaxTokens('glm-4')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4']);
|
||||
expect(getModelMaxTokens('glm4')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm4']);
|
||||
});
|
||||
|
||||
test('should return correct tokens for GLM models with provider prefixes', () => {
|
||||
expect(getModelMaxTokens('z-ai/glm-4.6')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.6']);
|
||||
expect(getModelMaxTokens('z-ai/glm-4.5')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.5']);
|
||||
expect(getModelMaxTokens('z-ai/glm-4.5-air')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5-air'],
|
||||
);
|
||||
expect(getModelMaxTokens('z-ai/glm-4.5v')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5v'],
|
||||
);
|
||||
expect(getModelMaxTokens('z-ai/glm-4-32b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4-32b'],
|
||||
);
|
||||
|
||||
expect(getModelMaxTokens('zai/glm-4.6')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.6']);
|
||||
expect(getModelMaxTokens('zai/glm-4.5-air')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5-air'],
|
||||
);
|
||||
expect(getModelMaxTokens('zai/glm-4.5v')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.5v']);
|
||||
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.6')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.6'],
|
||||
);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5'],
|
||||
);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5-Air')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5-air'],
|
||||
);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5V')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5v'],
|
||||
);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4-32B-0414')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4-32b'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for GLM models with suffixes', () => {
|
||||
expect(getModelMaxTokens('glm-4.6-fp8')).toBe(maxTokensMap[EModelEndpoint.openAI]['glm-4.6']);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.6-FP8')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.6'],
|
||||
);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5-Air-FP8')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['glm-4.5-air'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct max output tokens for GPT-5 models', () => {
|
||||
const { getModelMaxOutputTokens } = require('@librechat/api');
|
||||
['gpt-5', 'gpt-5-mini', 'gpt-5-nano'].forEach((model) => {
|
||||
['gpt-5', 'gpt-5-mini', 'gpt-5-nano', 'gpt-5-pro'].forEach((model) => {
|
||||
expect(getModelMaxOutputTokens(model)).toBe(maxOutputTokensMap[EModelEndpoint.openAI][model]);
|
||||
expect(getModelMaxOutputTokens(model, EModelEndpoint.openAI)).toBe(
|
||||
maxOutputTokensMap[EModelEndpoint.openAI][model],
|
||||
@@ -517,6 +595,13 @@ describe('matchModelName', () => {
|
||||
expect(matchModelName('gpt-5-nano-2025-01-30')).toBe('gpt-5-nano');
|
||||
});
|
||||
|
||||
it('should return the closest matching key for gpt-5-pro matches', () => {
|
||||
expect(matchModelName('openai/gpt-5-pro')).toBe('gpt-5-pro');
|
||||
expect(matchModelName('gpt-5-pro-preview')).toBe('gpt-5-pro');
|
||||
expect(matchModelName('gpt-5-pro-2025-01-30')).toBe('gpt-5-pro');
|
||||
expect(matchModelName('gpt-5-pro-2025-01-30-0130')).toBe('gpt-5-pro');
|
||||
});
|
||||
|
||||
// Tests for Google models
|
||||
it('should return the exact model name if it exists in maxTokensMap - Google models', () => {
|
||||
expect(matchModelName('text-bison-32k', EModelEndpoint.google)).toBe('text-bison-32k');
|
||||
@@ -767,6 +852,49 @@ describe('Claude Model Tests', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return correct context length for Claude Haiku 4.5', () => {
|
||||
expect(getModelMaxTokens('claude-haiku-4-5', EModelEndpoint.anthropic)).toBe(
|
||||
maxTokensMap[EModelEndpoint.anthropic]['claude-haiku-4-5'],
|
||||
);
|
||||
expect(getModelMaxTokens('claude-haiku-4-5')).toBe(
|
||||
maxTokensMap[EModelEndpoint.anthropic]['claude-haiku-4-5'],
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle Claude Haiku 4.5 model name variations', () => {
|
||||
const modelVariations = [
|
||||
'claude-haiku-4-5',
|
||||
'claude-haiku-4-5-20250420',
|
||||
'claude-haiku-4-5-latest',
|
||||
'anthropic/claude-haiku-4-5',
|
||||
'claude-haiku-4-5/anthropic',
|
||||
'claude-haiku-4-5-preview',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
const modelKey = findMatchingPattern(model, maxTokensMap[EModelEndpoint.anthropic]);
|
||||
expect(modelKey).toBe('claude-haiku-4-5');
|
||||
expect(getModelMaxTokens(model, EModelEndpoint.anthropic)).toBe(
|
||||
maxTokensMap[EModelEndpoint.anthropic]['claude-haiku-4-5'],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should match model names correctly for Claude Haiku 4.5', () => {
|
||||
const modelVariations = [
|
||||
'claude-haiku-4-5',
|
||||
'claude-haiku-4-5-20250420',
|
||||
'claude-haiku-4-5-latest',
|
||||
'anthropic/claude-haiku-4-5',
|
||||
'claude-haiku-4-5/anthropic',
|
||||
'claude-haiku-4-5-preview',
|
||||
];
|
||||
|
||||
modelVariations.forEach((model) => {
|
||||
expect(matchModelName(model, EModelEndpoint.anthropic)).toBe('claude-haiku-4-5');
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
|
||||
const modelVariations = [
|
||||
'claude-sonnet-4',
|
||||
@@ -858,3 +986,206 @@ describe('Kimi Model Tests', () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('Qwen3 Model Tests', () => {
|
||||
describe('getModelMaxTokens', () => {
|
||||
test('should return correct tokens for Qwen3 base pattern', () => {
|
||||
expect(getModelMaxTokens('qwen3')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']);
|
||||
});
|
||||
|
||||
test('should return correct tokens for qwen3-4b (falls back to qwen3)', () => {
|
||||
expect(getModelMaxTokens('qwen3-4b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']);
|
||||
});
|
||||
|
||||
test('should return correct tokens for Qwen3 base models', () => {
|
||||
expect(getModelMaxTokens('qwen3-8b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-8b']);
|
||||
expect(getModelMaxTokens('qwen3-14b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-14b']);
|
||||
expect(getModelMaxTokens('qwen3-32b')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-32b']);
|
||||
expect(getModelMaxTokens('qwen3-235b-a22b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-235b-a22b'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for Qwen3 VL (Vision-Language) models', () => {
|
||||
expect(getModelMaxTokens('qwen3-vl-8b-thinking')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-8b-thinking'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-vl-8b-instruct')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-8b-instruct'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-vl-30b-a3b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-30b-a3b'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-vl-235b-a22b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-235b-a22b'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should return correct tokens for Qwen3 specialized models', () => {
|
||||
expect(getModelMaxTokens('qwen3-max')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3-max']);
|
||||
expect(getModelMaxTokens('qwen3-coder')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-coder'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-coder-30b-a3b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-coder-30b-a3b'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-coder-plus')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-coder-plus'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-coder-flash')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-coder-flash'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-next-80b-a3b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-next-80b-a3b'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle Qwen3 models with provider prefixes', () => {
|
||||
expect(getModelMaxTokens('alibaba/qwen3')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']);
|
||||
expect(getModelMaxTokens('alibaba/qwen3-4b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen/qwen3-8b')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-8b'],
|
||||
);
|
||||
expect(getModelMaxTokens('openrouter/qwen3-max')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-max'],
|
||||
);
|
||||
expect(getModelMaxTokens('alibaba/qwen3-vl-8b-instruct')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-vl-8b-instruct'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen/qwen3-coder')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-coder'],
|
||||
);
|
||||
});
|
||||
|
||||
test('should handle Qwen3 models with suffixes', () => {
|
||||
expect(getModelMaxTokens('qwen3-preview')).toBe(maxTokensMap[EModelEndpoint.openAI]['qwen3']);
|
||||
expect(getModelMaxTokens('qwen3-4b-preview')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-8b-latest')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-8b'],
|
||||
);
|
||||
expect(getModelMaxTokens('qwen3-max-2024')).toBe(
|
||||
maxTokensMap[EModelEndpoint.openAI]['qwen3-max'],
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('matchModelName', () => {
|
||||
test('should match exact Qwen3 model names', () => {
|
||||
expect(matchModelName('qwen3')).toBe('qwen3');
|
||||
expect(matchModelName('qwen3-4b')).toBe('qwen3');
|
||||
expect(matchModelName('qwen3-8b')).toBe('qwen3-8b');
|
||||
expect(matchModelName('qwen3-vl-8b-thinking')).toBe('qwen3-vl-8b-thinking');
|
||||
expect(matchModelName('qwen3-max')).toBe('qwen3-max');
|
||||
expect(matchModelName('qwen3-coder')).toBe('qwen3-coder');
|
||||
});
|
||||
|
||||
test('should match Qwen3 model variations with provider prefixes', () => {
|
||||
expect(matchModelName('alibaba/qwen3')).toBe('qwen3');
|
||||
expect(matchModelName('alibaba/qwen3-4b')).toBe('qwen3');
|
||||
expect(matchModelName('qwen/qwen3-8b')).toBe('qwen3-8b');
|
||||
expect(matchModelName('openrouter/qwen3-max')).toBe('qwen3-max');
|
||||
expect(matchModelName('alibaba/qwen3-vl-8b-instruct')).toBe('qwen3-vl-8b-instruct');
|
||||
expect(matchModelName('qwen/qwen3-coder')).toBe('qwen3-coder');
|
||||
});
|
||||
|
||||
test('should match Qwen3 model variations with suffixes', () => {
|
||||
expect(matchModelName('qwen3-preview')).toBe('qwen3');
|
||||
expect(matchModelName('qwen3-4b-preview')).toBe('qwen3');
|
||||
expect(matchModelName('qwen3-8b-latest')).toBe('qwen3-8b');
|
||||
expect(matchModelName('qwen3-max-2024')).toBe('qwen3-max');
|
||||
expect(matchModelName('qwen3-coder-v1')).toBe('qwen3-coder');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('GLM Model Tests (Zhipu AI)', () => {
|
||||
describe('getModelMaxTokens', () => {
|
||||
test('should return correct tokens for GLM models', () => {
|
||||
expect(getModelMaxTokens('glm-4.6')).toBe(200000);
|
||||
expect(getModelMaxTokens('glm-4.5v')).toBe(66000);
|
||||
expect(getModelMaxTokens('glm-4.5-air')).toBe(131000);
|
||||
expect(getModelMaxTokens('glm-4.5')).toBe(131000);
|
||||
expect(getModelMaxTokens('glm-4-32b')).toBe(128000);
|
||||
expect(getModelMaxTokens('glm-4')).toBe(128000);
|
||||
expect(getModelMaxTokens('glm4')).toBe(128000);
|
||||
});
|
||||
|
||||
test('should handle partial matches for GLM models with provider prefixes', () => {
|
||||
expect(getModelMaxTokens('z-ai/glm-4.6')).toBe(200000);
|
||||
expect(getModelMaxTokens('z-ai/glm-4.5')).toBe(131000);
|
||||
expect(getModelMaxTokens('z-ai/glm-4.5-air')).toBe(131000);
|
||||
expect(getModelMaxTokens('z-ai/glm-4.5v')).toBe(66000);
|
||||
expect(getModelMaxTokens('z-ai/glm-4-32b')).toBe(128000);
|
||||
|
||||
expect(getModelMaxTokens('zai/glm-4.6')).toBe(200000);
|
||||
expect(getModelMaxTokens('zai/glm-4.5')).toBe(131000);
|
||||
expect(getModelMaxTokens('zai/glm-4.5-air')).toBe(131000);
|
||||
expect(getModelMaxTokens('zai/glm-4.5v')).toBe(66000);
|
||||
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.6')).toBe(200000);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5')).toBe(131000);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5-Air')).toBe(131000);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5V')).toBe(66000);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4-32B-0414')).toBe(128000);
|
||||
});
|
||||
|
||||
test('should handle GLM model variations with suffixes', () => {
|
||||
expect(getModelMaxTokens('glm-4.6-fp8')).toBe(200000);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.6-FP8')).toBe(200000);
|
||||
expect(getModelMaxTokens('zai-org/GLM-4.5-Air-FP8')).toBe(131000);
|
||||
});
|
||||
|
||||
test('should prioritize more specific GLM patterns', () => {
|
||||
expect(getModelMaxTokens('glm-4.5-air-custom')).toBe(131000);
|
||||
expect(getModelMaxTokens('glm-4.5-custom')).toBe(131000);
|
||||
expect(getModelMaxTokens('glm-4.5v-custom')).toBe(66000);
|
||||
});
|
||||
});
|
||||
|
||||
describe('matchModelName', () => {
|
||||
test('should match exact GLM model names', () => {
|
||||
expect(matchModelName('glm-4.6')).toBe('glm-4.6');
|
||||
expect(matchModelName('glm-4.5v')).toBe('glm-4.5v');
|
||||
expect(matchModelName('glm-4.5-air')).toBe('glm-4.5-air');
|
||||
expect(matchModelName('glm-4.5')).toBe('glm-4.5');
|
||||
expect(matchModelName('glm-4-32b')).toBe('glm-4-32b');
|
||||
expect(matchModelName('glm-4')).toBe('glm-4');
|
||||
expect(matchModelName('glm4')).toBe('glm4');
|
||||
});
|
||||
|
||||
test('should match GLM model variations with provider prefixes', () => {
|
||||
expect(matchModelName('z-ai/glm-4.6')).toBe('glm-4.6');
|
||||
expect(matchModelName('z-ai/glm-4.5')).toBe('glm-4.5');
|
||||
expect(matchModelName('z-ai/glm-4.5-air')).toBe('glm-4.5-air');
|
||||
expect(matchModelName('z-ai/glm-4.5v')).toBe('glm-4.5v');
|
||||
expect(matchModelName('z-ai/glm-4-32b')).toBe('glm-4-32b');
|
||||
|
||||
expect(matchModelName('zai/glm-4.6')).toBe('glm-4.6');
|
||||
expect(matchModelName('zai/glm-4.5')).toBe('glm-4.5');
|
||||
expect(matchModelName('zai/glm-4.5-air')).toBe('glm-4.5-air');
|
||||
expect(matchModelName('zai/glm-4.5v')).toBe('glm-4.5v');
|
||||
|
||||
expect(matchModelName('zai-org/GLM-4.6')).toBe('glm-4.6');
|
||||
expect(matchModelName('zai-org/GLM-4.5')).toBe('glm-4.5');
|
||||
expect(matchModelName('zai-org/GLM-4.5-Air')).toBe('glm-4.5-air');
|
||||
expect(matchModelName('zai-org/GLM-4.5V')).toBe('glm-4.5v');
|
||||
expect(matchModelName('zai-org/GLM-4-32B-0414')).toBe('glm-4-32b');
|
||||
});
|
||||
|
||||
test('should match GLM model variations with suffixes', () => {
|
||||
expect(matchModelName('glm-4.6-fp8')).toBe('glm-4.6');
|
||||
expect(matchModelName('zai-org/GLM-4.6-FP8')).toBe('glm-4.6');
|
||||
expect(matchModelName('zai-org/GLM-4.5-Air-FP8')).toBe('glm-4.5-air');
|
||||
});
|
||||
|
||||
test('should handle case-insensitive matching for GLM models', () => {
|
||||
expect(matchModelName('zai-org/GLM-4.6')).toBe('glm-4.6');
|
||||
expect(matchModelName('zai-org/GLM-4.5V')).toBe('glm-4.5v');
|
||||
expect(matchModelName('zai-org/GLM-4-32B-0414')).toBe('glm-4-32b');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
/** v0.8.0 */
|
||||
/** v0.8.1-rc1 */
|
||||
module.exports = {
|
||||
roots: ['<rootDir>/src'],
|
||||
testEnvironment: 'jsdom',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/frontend",
|
||||
"version": "v0.8.0",
|
||||
"version": "v0.8.1-rc1",
|
||||
"description": "",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
@@ -136,6 +136,7 @@
|
||||
"babel-plugin-transform-import-meta": "^2.3.2",
|
||||
"babel-plugin-transform-vite-meta-env": "^1.0.3",
|
||||
"eslint-plugin-jest": "^28.11.0",
|
||||
"fs-extra": "^11.3.2",
|
||||
"identity-obj-proxy": "^3.0.0",
|
||||
"jest": "^29.7.0",
|
||||
"jest-canvas-mock": "^2.5.2",
|
||||
@@ -148,7 +149,7 @@
|
||||
"tailwindcss": "^3.4.1",
|
||||
"ts-jest": "^29.2.5",
|
||||
"typescript": "^5.3.3",
|
||||
"vite": "^6.3.6",
|
||||
"vite": "^6.4.1",
|
||||
"vite-plugin-compression2": "^2.2.1",
|
||||
"vite-plugin-node-polyfills": "^0.23.0",
|
||||
"vite-plugin-pwa": "^0.21.2"
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useEffect } from 'react';
|
||||
import { RecoilRoot } from 'recoil';
|
||||
import { DndProvider } from 'react-dnd';
|
||||
import { RouterProvider } from 'react-router-dom';
|
||||
@@ -8,6 +9,7 @@ import { Toast, ThemeProvider, ToastProvider } from '@librechat/client';
|
||||
import { QueryClient, QueryClientProvider, QueryCache } from '@tanstack/react-query';
|
||||
import { ScreenshotProvider, useApiErrorBoundary } from './hooks';
|
||||
import { getThemeFromEnv } from './utils/getThemeFromEnv';
|
||||
import { initializeFontSize } from '~/store/fontSize';
|
||||
import { LiveAnnouncer } from '~/a11y';
|
||||
import { router } from './routes';
|
||||
|
||||
@@ -24,6 +26,10 @@ const App = () => {
|
||||
}),
|
||||
});
|
||||
|
||||
useEffect(() => {
|
||||
initializeFontSize();
|
||||
}, []);
|
||||
|
||||
// Load theme from environment variables if available
|
||||
const envTheme = getThemeFromEnv();
|
||||
|
||||
|
||||
@@ -35,9 +35,7 @@ export function AgentPanelProvider({ children }: { children: React.ReactNode })
|
||||
enabled: !isEphemeralAgent(agent_id),
|
||||
});
|
||||
|
||||
const { data: regularTools } = useAvailableToolsQuery(EModelEndpoint.agents, {
|
||||
enabled: !isEphemeralAgent(agent_id),
|
||||
});
|
||||
const { data: regularTools } = useAvailableToolsQuery(EModelEndpoint.agents);
|
||||
|
||||
const { data: mcpData } = useMCPToolsQuery({
|
||||
enabled: !isEphemeralAgent(agent_id) && startupConfig?.mcpServers != null,
|
||||
|
||||
@@ -1,23 +1,38 @@
|
||||
import React, { createContext, useContext, useMemo } from 'react';
|
||||
import type { EModelEndpoint } from 'librechat-data-provider';
|
||||
import { useGetEndpointsQuery } from '~/data-provider';
|
||||
import { getEndpointField } from '~/utils/endpoints';
|
||||
import { useChatContext } from './ChatContext';
|
||||
|
||||
interface DragDropContextValue {
|
||||
conversationId: string | null | undefined;
|
||||
agentId: string | null | undefined;
|
||||
endpoint: string | null | undefined;
|
||||
endpointType?: EModelEndpoint | undefined;
|
||||
}
|
||||
|
||||
const DragDropContext = createContext<DragDropContextValue | undefined>(undefined);
|
||||
|
||||
export function DragDropProvider({ children }: { children: React.ReactNode }) {
|
||||
const { conversation } = useChatContext();
|
||||
const { data: endpointsConfig } = useGetEndpointsQuery();
|
||||
|
||||
const endpointType = useMemo(() => {
|
||||
return (
|
||||
getEndpointField(endpointsConfig, conversation?.endpoint, 'type') ||
|
||||
(conversation?.endpoint as EModelEndpoint | undefined)
|
||||
);
|
||||
}, [conversation?.endpoint, endpointsConfig]);
|
||||
|
||||
/** Context value only created when conversation fields change */
|
||||
const contextValue = useMemo<DragDropContextValue>(
|
||||
() => ({
|
||||
conversationId: conversation?.conversationId,
|
||||
agentId: conversation?.agent_id,
|
||||
endpoint: conversation?.endpoint,
|
||||
endpointType: endpointType,
|
||||
}),
|
||||
[conversation?.conversationId, conversation?.agent_id],
|
||||
[conversation?.conversationId, conversation?.agent_id, conversation?.endpoint, endpointType],
|
||||
);
|
||||
|
||||
return <DragDropContext.Provider value={contextValue}>{children}</DragDropContext.Provider>;
|
||||
|
||||
@@ -11,9 +11,9 @@ import {
|
||||
AgentListResponse,
|
||||
} from 'librechat-data-provider';
|
||||
import type t from 'librechat-data-provider';
|
||||
import { renderAgentAvatar, clearMessagesCache } from '~/utils';
|
||||
import { useLocalize, useDefaultConvo } from '~/hooks';
|
||||
import { useChatContext } from '~/Providers';
|
||||
import { renderAgentAvatar } from '~/utils';
|
||||
|
||||
interface SupportContact {
|
||||
name?: string;
|
||||
@@ -56,10 +56,7 @@ const AgentDetail: React.FC<AgentDetailProps> = ({ agent, isOpen, onClose }) =>
|
||||
|
||||
localStorage.setItem(`${LocalStorageKeys.AGENT_ID_PREFIX}0`, agent.id);
|
||||
|
||||
queryClient.setQueryData<t.TMessage[]>(
|
||||
[QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO],
|
||||
[],
|
||||
);
|
||||
clearMessagesCache(queryClient, conversation?.conversationId);
|
||||
queryClient.invalidateQueries([QueryKeys.messages]);
|
||||
|
||||
/** Template with agent configuration */
|
||||
|
||||
@@ -4,7 +4,7 @@ import { useOutletContext } from 'react-router-dom';
|
||||
import { useQueryClient } from '@tanstack/react-query';
|
||||
import { useSearchParams, useParams, useNavigate } from 'react-router-dom';
|
||||
import { TooltipAnchor, Button, NewChatIcon, useMediaQuery } from '@librechat/client';
|
||||
import { PermissionTypes, Permissions, QueryKeys, Constants } from 'librechat-data-provider';
|
||||
import { PermissionTypes, Permissions, QueryKeys } from 'librechat-data-provider';
|
||||
import type t from 'librechat-data-provider';
|
||||
import type { ContextType } from '~/common';
|
||||
import { useDocumentTitle, useHasAccess, useLocalize, TranslationKeys } from '~/hooks';
|
||||
@@ -13,11 +13,11 @@ import MarketplaceAdminSettings from './MarketplaceAdminSettings';
|
||||
import { SidePanelProvider, useChatContext } from '~/Providers';
|
||||
import { SidePanelGroup } from '~/components/SidePanel';
|
||||
import { OpenSidebar } from '~/components/Chat/Menus';
|
||||
import { cn, clearMessagesCache } from '~/utils';
|
||||
import CategoryTabs from './CategoryTabs';
|
||||
import AgentDetail from './AgentDetail';
|
||||
import SearchBar from './SearchBar';
|
||||
import AgentGrid from './AgentGrid';
|
||||
import { cn } from '~/utils';
|
||||
import store from '~/store';
|
||||
|
||||
interface AgentMarketplaceProps {
|
||||
@@ -224,10 +224,7 @@ const AgentMarketplace: React.FC<AgentMarketplaceProps> = ({ className = '' }) =
|
||||
window.open('/c/new', '_blank');
|
||||
return;
|
||||
}
|
||||
queryClient.setQueryData<t.TMessage[]>(
|
||||
[QueryKeys.messages, conversation?.conversationId ?? Constants.NEW_CONVO],
|
||||
[],
|
||||
);
|
||||
clearMessagesCache(queryClient, conversation?.conversationId);
|
||||
queryClient.invalidateQueries([QueryKeys.messages]);
|
||||
newConversation();
|
||||
};
|
||||
|
||||
@@ -58,6 +58,7 @@ const LabelController: React.FC<LabelControllerProps> = ({
|
||||
checked={field.value}
|
||||
onCheckedChange={field.onChange}
|
||||
value={field.value.toString()}
|
||||
aria-label={label}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
|
||||
@@ -194,7 +194,7 @@ describe('Virtual Scrolling Performance', () => {
|
||||
|
||||
// Performance check: rendering should be fast
|
||||
const renderTime = endTime - startTime;
|
||||
expect(renderTime).toBeLessThan(720);
|
||||
expect(renderTime).toBeLessThan(740);
|
||||
|
||||
console.log(`Rendered 1000 agents in ${renderTime.toFixed(2)}ms`);
|
||||
console.log(`Only ${renderedCards.length} DOM nodes created for 1000 agents`);
|
||||
|
||||
@@ -144,9 +144,10 @@ export const ArtifactCodeEditor = function ({
|
||||
}
|
||||
return {
|
||||
...sharedOptions,
|
||||
activeFile: '/' + fileKey,
|
||||
bundlerURL: template === 'static' ? config.staticBundlerURL : config.bundlerURL,
|
||||
};
|
||||
}, [config, template]);
|
||||
}, [config, template, fileKey]);
|
||||
const [readOnly, setReadOnly] = useState(isSubmitting ?? false);
|
||||
useEffect(() => {
|
||||
setReadOnly(isSubmitting ?? false);
|
||||
|
||||
@@ -13,7 +13,6 @@ export const ArtifactPreview = memo(function ({
|
||||
files,
|
||||
fileKey,
|
||||
template,
|
||||
isMermaid,
|
||||
sharedProps,
|
||||
previewRef,
|
||||
currentCode,
|
||||
@@ -21,7 +20,6 @@ export const ArtifactPreview = memo(function ({
|
||||
}: {
|
||||
files: ArtifactFiles;
|
||||
fileKey: string;
|
||||
isMermaid: boolean;
|
||||
template: SandpackProviderProps['template'];
|
||||
sharedProps: Partial<SandpackProviderProps>;
|
||||
previewRef: React.MutableRefObject<SandpackPreviewRef>;
|
||||
@@ -56,15 +54,6 @@ export const ArtifactPreview = memo(function ({
|
||||
return _options;
|
||||
}, [startupConfig, template]);
|
||||
|
||||
const style: PreviewProps['style'] | undefined = useMemo(() => {
|
||||
if (isMermaid) {
|
||||
return {
|
||||
backgroundColor: '#282C34',
|
||||
};
|
||||
}
|
||||
return;
|
||||
}, [isMermaid]);
|
||||
|
||||
if (Object.keys(artifactFiles).length === 0) {
|
||||
return null;
|
||||
}
|
||||
@@ -84,7 +73,6 @@ export const ArtifactPreview = memo(function ({
|
||||
showRefreshButton={false}
|
||||
tabIndex={0}
|
||||
ref={previewRef}
|
||||
style={style}
|
||||
/>
|
||||
</SandpackProvider>
|
||||
);
|
||||
|
||||
@@ -8,17 +8,14 @@ import { useAutoScroll } from '~/hooks/Artifacts/useAutoScroll';
|
||||
import { ArtifactCodeEditor } from './ArtifactCodeEditor';
|
||||
import { useGetStartupConfig } from '~/data-provider';
|
||||
import { ArtifactPreview } from './ArtifactPreview';
|
||||
import { MermaidMarkdown } from './MermaidMarkdown';
|
||||
import { cn } from '~/utils';
|
||||
|
||||
export default function ArtifactTabs({
|
||||
artifact,
|
||||
isMermaid,
|
||||
editorRef,
|
||||
previewRef,
|
||||
}: {
|
||||
artifact: Artifact;
|
||||
isMermaid: boolean;
|
||||
editorRef: React.MutableRefObject<CodeEditorRef>;
|
||||
previewRef: React.MutableRefObject<SandpackPreviewRef>;
|
||||
}) {
|
||||
@@ -44,26 +41,22 @@ export default function ArtifactTabs({
|
||||
value="code"
|
||||
id="artifacts-code"
|
||||
className={cn('flex-grow overflow-auto')}
|
||||
tabIndex={-1}
|
||||
>
|
||||
{isMermaid ? (
|
||||
<MermaidMarkdown content={content} isSubmitting={isSubmitting} />
|
||||
) : (
|
||||
<ArtifactCodeEditor
|
||||
files={files}
|
||||
fileKey={fileKey}
|
||||
template={template}
|
||||
artifact={artifact}
|
||||
editorRef={editorRef}
|
||||
sharedProps={sharedProps}
|
||||
/>
|
||||
)}
|
||||
<ArtifactCodeEditor
|
||||
files={files}
|
||||
fileKey={fileKey}
|
||||
template={template}
|
||||
artifact={artifact}
|
||||
editorRef={editorRef}
|
||||
sharedProps={sharedProps}
|
||||
/>
|
||||
</Tabs.Content>
|
||||
<Tabs.Content value="preview" className="flex-grow overflow-auto">
|
||||
<Tabs.Content value="preview" className="flex-grow overflow-auto" tabIndex={-1}>
|
||||
<ArtifactPreview
|
||||
files={files}
|
||||
fileKey={fileKey}
|
||||
template={template}
|
||||
isMermaid={isMermaid}
|
||||
previewRef={previewRef}
|
||||
sharedProps={sharedProps}
|
||||
currentCode={currentCode}
|
||||
|
||||
@@ -27,7 +27,6 @@ export default function Artifacts() {
|
||||
|
||||
const {
|
||||
activeTab,
|
||||
isMermaid,
|
||||
setActiveTab,
|
||||
currentIndex,
|
||||
cycleArtifact,
|
||||
@@ -116,7 +115,6 @@ export default function Artifacts() {
|
||||
</div>
|
||||
{/* Content */}
|
||||
<ArtifactTabs
|
||||
isMermaid={isMermaid}
|
||||
artifact={currentArtifact}
|
||||
editorRef={editorRef as React.MutableRefObject<CodeEditorRef>}
|
||||
previewRef={previewRef as React.MutableRefObject<SandpackPreviewRef>}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
import { CodeMarkdown } from './Code';
|
||||
|
||||
export function MermaidMarkdown({
|
||||
content,
|
||||
isSubmitting,
|
||||
}: {
|
||||
content: string;
|
||||
isSubmitting: boolean;
|
||||
}) {
|
||||
return <CodeMarkdown content={`\`\`\`mermaid\n${content}\`\`\``} isSubmitting={isSubmitting} />;
|
||||
}
|
||||
@@ -19,9 +19,11 @@ export function BrowserVoiceDropdown() {
|
||||
}
|
||||
};
|
||||
|
||||
const labelId = 'browser-voice-dropdown-label';
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between">
|
||||
<div>{localize('com_nav_voice_select')}</div>
|
||||
<div id={labelId}>{localize('com_nav_voice_select')}</div>
|
||||
<Dropdown
|
||||
key={`browser-voice-dropdown-${voices.length}`}
|
||||
value={voice ?? ''}
|
||||
@@ -30,6 +32,7 @@ export function BrowserVoiceDropdown() {
|
||||
sizeClasses="min-w-[200px] !max-w-[400px] [--anchor-max-width:400px]"
|
||||
testId="BrowserVoiceDropdown"
|
||||
className="z-50"
|
||||
aria-labelledby={labelId}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
@@ -48,9 +51,11 @@ export function ExternalVoiceDropdown() {
|
||||
}
|
||||
};
|
||||
|
||||
const labelId = 'external-voice-dropdown-label';
|
||||
|
||||
return (
|
||||
<div className="flex items-center justify-between">
|
||||
<div>{localize('com_nav_voice_select')}</div>
|
||||
<div id={labelId}>{localize('com_nav_voice_select')}</div>
|
||||
<Dropdown
|
||||
key={`external-voice-dropdown-${voices.length}`}
|
||||
value={voice ?? ''}
|
||||
@@ -59,6 +64,7 @@ export function ExternalVoiceDropdown() {
|
||||
sizeClasses="min-w-[200px] !max-w-[400px] [--anchor-max-width:400px]"
|
||||
testId="ExternalVoiceDropdown"
|
||||
className="z-50"
|
||||
aria-labelledby={labelId}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
||||
@@ -129,7 +129,11 @@ const BookmarkForm = ({
|
||||
</div>
|
||||
|
||||
<div className="mt-4 grid w-full items-center gap-2">
|
||||
<Label htmlFor="bookmark-description" className="text-left text-sm font-medium">
|
||||
<Label
|
||||
id="bookmark-description-label"
|
||||
htmlFor="bookmark-description"
|
||||
className="text-left text-sm font-medium"
|
||||
>
|
||||
{localize('com_ui_bookmarks_description')}
|
||||
</Label>
|
||||
<TextareaAutosize
|
||||
@@ -147,6 +151,7 @@ const BookmarkForm = ({
|
||||
className={cn(
|
||||
'flex h-10 max-h-[250px] min-h-[100px] w-full resize-none rounded-lg border border-input bg-transparent px-3 py-2 text-sm ring-offset-background focus-visible:outline-none',
|
||||
)}
|
||||
aria-labelledby="bookmark-description-label"
|
||||
/>
|
||||
</div>
|
||||
{conversationId != null && conversationId && (
|
||||
@@ -161,6 +166,7 @@ const BookmarkForm = ({
|
||||
onCheckedChange={field.onChange}
|
||||
className="relative float-left mr-2 inline-flex h-4 w-4 cursor-pointer"
|
||||
value={field.value?.toString()}
|
||||
aria-label={localize('com_ui_bookmarks_add_to_conversation')}
|
||||
/>
|
||||
)}
|
||||
/>
|
||||
|
||||
@@ -12,6 +12,7 @@ import {
|
||||
import {
|
||||
useTextarea,
|
||||
useAutoSave,
|
||||
useLocalize,
|
||||
useRequiresKey,
|
||||
useHandleKeyUp,
|
||||
useQueryParams,
|
||||
@@ -38,6 +39,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
|
||||
const submitButtonRef = useRef<HTMLButtonElement>(null);
|
||||
const textAreaRef = useRef<HTMLTextAreaElement>(null);
|
||||
useFocusChatEffect(textAreaRef);
|
||||
const localize = useLocalize();
|
||||
|
||||
const [isCollapsed, setIsCollapsed] = useState(false);
|
||||
const [, setIsScrollable] = useState(false);
|
||||
@@ -220,6 +222,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
|
||||
<div className={cn('flex w-full items-center', isRTL && 'flex-row-reverse')}>
|
||||
{showPlusPopover && !isAssistantsEndpoint(endpoint) && (
|
||||
<Mention
|
||||
conversation={conversation}
|
||||
setShowMentionPopover={setShowPlusPopover}
|
||||
newConversation={generateConversation}
|
||||
textAreaRef={textAreaRef}
|
||||
@@ -230,6 +233,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
|
||||
)}
|
||||
{showMentionPopover && (
|
||||
<Mention
|
||||
conversation={conversation}
|
||||
setShowMentionPopover={setShowMentionPopover}
|
||||
newConversation={newConversation}
|
||||
textAreaRef={textAreaRef}
|
||||
@@ -277,6 +281,7 @@ const ChatForm = memo(({ index = 0 }: { index?: number }) => {
|
||||
setIsTextAreaFocused(true);
|
||||
}}
|
||||
onBlur={setIsTextAreaFocused.bind(null, false)}
|
||||
aria-label={localize('com_ui_message_input')}
|
||||
onClick={handleFocusOrClick}
|
||||
style={{ height: 44, overflowY: 'auto' }}
|
||||
className={cn(
|
||||
|
||||
@@ -2,13 +2,15 @@ import { memo, useMemo } from 'react';
|
||||
import {
|
||||
Constants,
|
||||
supportsFiles,
|
||||
EModelEndpoint,
|
||||
mergeFileConfig,
|
||||
isAgentsEndpoint,
|
||||
isAssistantsEndpoint,
|
||||
fileConfig as defaultFileConfig,
|
||||
} from 'librechat-data-provider';
|
||||
import type { EndpointFileConfig, TConversation } from 'librechat-data-provider';
|
||||
import { useGetFileConfig } from '~/data-provider';
|
||||
import { useGetFileConfig, useGetEndpointsQuery } from '~/data-provider';
|
||||
import { getEndpointField } from '~/utils/endpoints';
|
||||
import AttachFileMenu from './AttachFileMenu';
|
||||
import AttachFile from './AttachFile';
|
||||
|
||||
@@ -20,7 +22,7 @@ function AttachFileChat({
|
||||
conversation: TConversation | null;
|
||||
}) {
|
||||
const conversationId = conversation?.conversationId ?? Constants.NEW_CONVO;
|
||||
const { endpoint, endpointType } = conversation ?? { endpoint: null };
|
||||
const { endpoint } = conversation ?? { endpoint: null };
|
||||
const isAgents = useMemo(() => isAgentsEndpoint(endpoint), [endpoint]);
|
||||
const isAssistants = useMemo(() => isAssistantsEndpoint(endpoint), [endpoint]);
|
||||
|
||||
@@ -28,6 +30,15 @@ function AttachFileChat({
|
||||
select: (data) => mergeFileConfig(data),
|
||||
});
|
||||
|
||||
const { data: endpointsConfig } = useGetEndpointsQuery();
|
||||
|
||||
const endpointType = useMemo(() => {
|
||||
return (
|
||||
getEndpointField(endpointsConfig, endpoint, 'type') ||
|
||||
(endpoint as EModelEndpoint | undefined)
|
||||
);
|
||||
}, [endpoint, endpointsConfig]);
|
||||
|
||||
const endpointFileConfig = fileConfig.endpoints[endpoint ?? ''] as EndpointFileConfig | undefined;
|
||||
const endpointSupportsFiles: boolean = supportsFiles[endpointType ?? endpoint ?? ''] ?? false;
|
||||
const isUploadDisabled = (disableInputs || endpointFileConfig?.disabled) ?? false;
|
||||
@@ -37,7 +48,9 @@ function AttachFileChat({
|
||||
} else if (isAgents || (endpointSupportsFiles && !isUploadDisabled)) {
|
||||
return (
|
||||
<AttachFileMenu
|
||||
endpoint={endpoint}
|
||||
disabled={disableInputs}
|
||||
endpointType={endpointType}
|
||||
conversationId={conversationId}
|
||||
agentId={conversation?.agent_id}
|
||||
endpointFileConfig={endpointFileConfig}
|
||||
|
||||
@@ -1,8 +1,19 @@
|
||||
import React, { useRef, useState, useMemo } from 'react';
|
||||
import * as Ariakit from '@ariakit/react';
|
||||
import { useRecoilState } from 'recoil';
|
||||
import { FileSearch, ImageUpIcon, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
|
||||
import { EToolResources, EModelEndpoint, defaultAgentCapabilities } from 'librechat-data-provider';
|
||||
import * as Ariakit from '@ariakit/react';
|
||||
import {
|
||||
FileSearch,
|
||||
ImageUpIcon,
|
||||
FileType2Icon,
|
||||
FileImageIcon,
|
||||
TerminalSquareIcon,
|
||||
} from 'lucide-react';
|
||||
import {
|
||||
EToolResources,
|
||||
EModelEndpoint,
|
||||
defaultAgentCapabilities,
|
||||
isDocumentSupportedProvider,
|
||||
} from 'librechat-data-provider';
|
||||
import {
|
||||
FileUpload,
|
||||
TooltipAnchor,
|
||||
@@ -26,15 +37,19 @@ import { MenuItemProps } from '~/common';
|
||||
import { cn } from '~/utils';
|
||||
|
||||
interface AttachFileMenuProps {
|
||||
conversationId: string;
|
||||
agentId?: string | null;
|
||||
endpoint?: string | null;
|
||||
disabled?: boolean | null;
|
||||
conversationId: string;
|
||||
endpointType?: EModelEndpoint;
|
||||
endpointFileConfig?: EndpointFileConfig;
|
||||
}
|
||||
|
||||
const AttachFileMenu = ({
|
||||
agentId,
|
||||
endpoint,
|
||||
disabled,
|
||||
endpointType,
|
||||
conversationId,
|
||||
endpointFileConfig,
|
||||
}: AttachFileMenuProps) => {
|
||||
@@ -55,44 +70,75 @@ const AttachFileMenu = ({
|
||||
overrideEndpointFileConfig: endpointFileConfig,
|
||||
toolResource,
|
||||
});
|
||||
|
||||
const { agentsConfig } = useGetAgentsConfig();
|
||||
const { data: startupConfig } = useGetStartupConfig();
|
||||
const sharePointEnabled = startupConfig?.sharePointFilePickerEnabled;
|
||||
|
||||
const [isSharePointDialogOpen, setIsSharePointDialogOpen] = useState(false);
|
||||
const { agentsConfig } = useGetAgentsConfig();
|
||||
|
||||
/** TODO: Ephemeral Agent Capabilities
|
||||
* Allow defining agent capabilities on a per-endpoint basis
|
||||
* Use definition for agents endpoint for ephemeral agents
|
||||
* */
|
||||
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
|
||||
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent, provider } = useAgentToolPermissions(
|
||||
agentId,
|
||||
ephemeralAgent,
|
||||
);
|
||||
|
||||
const handleUploadClick = (isImage?: boolean) => {
|
||||
const handleUploadClick = (
|
||||
fileType?: 'image' | 'document' | 'multimodal' | 'google_multimodal',
|
||||
) => {
|
||||
if (!inputRef.current) {
|
||||
return;
|
||||
}
|
||||
inputRef.current.value = '';
|
||||
inputRef.current.accept = isImage === true ? 'image/*' : '';
|
||||
if (fileType === 'image') {
|
||||
inputRef.current.accept = 'image/*';
|
||||
} else if (fileType === 'document') {
|
||||
inputRef.current.accept = '.pdf,application/pdf';
|
||||
} else if (fileType === 'multimodal') {
|
||||
inputRef.current.accept = 'image/*,.pdf,application/pdf';
|
||||
} else if (fileType === 'google_multimodal') {
|
||||
inputRef.current.accept = 'image/*,.pdf,application/pdf,video/*,audio/*';
|
||||
} else {
|
||||
inputRef.current.accept = '';
|
||||
}
|
||||
inputRef.current.click();
|
||||
inputRef.current.accept = '';
|
||||
};
|
||||
|
||||
const dropdownItems = useMemo(() => {
|
||||
const createMenuItems = (onAction: (isImage?: boolean) => void) => {
|
||||
const items: MenuItemProps[] = [
|
||||
{
|
||||
const createMenuItems = (
|
||||
onAction: (fileType?: 'image' | 'document' | 'multimodal' | 'google_multimodal') => void,
|
||||
) => {
|
||||
const items: MenuItemProps[] = [];
|
||||
|
||||
const currentProvider = provider || endpoint;
|
||||
|
||||
if (isDocumentSupportedProvider(currentProvider || endpointType)) {
|
||||
items.push({
|
||||
label: localize('com_ui_upload_provider'),
|
||||
onClick: () => {
|
||||
setToolResource(undefined);
|
||||
onAction(
|
||||
(provider || endpoint) === EModelEndpoint.google ? 'google_multimodal' : 'multimodal',
|
||||
);
|
||||
},
|
||||
icon: <FileImageIcon className="icon-md" />,
|
||||
});
|
||||
} else {
|
||||
items.push({
|
||||
label: localize('com_ui_upload_image_input'),
|
||||
onClick: () => {
|
||||
setToolResource(undefined);
|
||||
onAction(true);
|
||||
onAction('image');
|
||||
},
|
||||
icon: <ImageUpIcon className="icon-md" />,
|
||||
},
|
||||
];
|
||||
});
|
||||
}
|
||||
|
||||
if (capabilities.contextEnabled) {
|
||||
items.push({
|
||||
@@ -156,8 +202,11 @@ const AttachFileMenu = ({
|
||||
|
||||
return localItems;
|
||||
}, [
|
||||
capabilities,
|
||||
localize,
|
||||
endpoint,
|
||||
provider,
|
||||
endpointType,
|
||||
capabilities,
|
||||
setToolResource,
|
||||
setEphemeralAgent,
|
||||
sharePointEnabled,
|
||||
|
||||
@@ -1,8 +1,19 @@
|
||||
import React, { useMemo } from 'react';
|
||||
import { useRecoilValue } from 'recoil';
|
||||
import { OGDialog, OGDialogTemplate } from '@librechat/client';
|
||||
import { EToolResources, defaultAgentCapabilities } from 'librechat-data-provider';
|
||||
import { ImageUpIcon, FileSearch, TerminalSquareIcon, FileType2Icon } from 'lucide-react';
|
||||
import {
|
||||
EToolResources,
|
||||
EModelEndpoint,
|
||||
defaultAgentCapabilities,
|
||||
isDocumentSupportedProvider,
|
||||
} from 'librechat-data-provider';
|
||||
import {
|
||||
ImageUpIcon,
|
||||
FileSearch,
|
||||
FileType2Icon,
|
||||
FileImageIcon,
|
||||
TerminalSquareIcon,
|
||||
} from 'lucide-react';
|
||||
import {
|
||||
useAgentToolPermissions,
|
||||
useAgentCapabilities,
|
||||
@@ -34,22 +45,45 @@ const DragDropModal = ({ onOptionSelect, setShowModal, files, isVisible }: DragD
|
||||
* Use definition for agents endpoint for ephemeral agents
|
||||
* */
|
||||
const capabilities = useAgentCapabilities(agentsConfig?.capabilities ?? defaultAgentCapabilities);
|
||||
const { conversationId, agentId } = useDragDropContext();
|
||||
const { conversationId, agentId, endpoint, endpointType } = useDragDropContext();
|
||||
const ephemeralAgent = useRecoilValue(ephemeralAgentByConvoId(conversationId ?? ''));
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent } = useAgentToolPermissions(
|
||||
const { fileSearchAllowedByAgent, codeAllowedByAgent, provider } = useAgentToolPermissions(
|
||||
agentId,
|
||||
ephemeralAgent,
|
||||
);
|
||||
|
||||
const options = useMemo(() => {
|
||||
const _options: FileOption[] = [
|
||||
{
|
||||
const _options: FileOption[] = [];
|
||||
const currentProvider = provider || endpoint;
|
||||
|
||||
// Check if provider supports document upload
|
||||
if (isDocumentSupportedProvider(currentProvider || endpointType)) {
|
||||
const isGoogleProvider = currentProvider === EModelEndpoint.google;
|
||||
const validFileTypes = isGoogleProvider
|
||||
? files.every(
|
||||
(file) =>
|
||||
file.type?.startsWith('image/') ||
|
||||
file.type?.startsWith('video/') ||
|
||||
file.type?.startsWith('audio/') ||
|
||||
file.type === 'application/pdf',
|
||||
)
|
||||
: files.every((file) => file.type?.startsWith('image/') || file.type === 'application/pdf');
|
||||
|
||||
_options.push({
|
||||
label: localize('com_ui_upload_provider'),
|
||||
value: undefined,
|
||||
icon: <FileImageIcon className="icon-md" />,
|
||||
condition: validFileTypes,
|
||||
});
|
||||
} else {
|
||||
// Only show image upload option if all files are images and provider doesn't support documents
|
||||
_options.push({
|
||||
label: localize('com_ui_upload_image_input'),
|
||||
value: undefined,
|
||||
icon: <ImageUpIcon className="icon-md" />,
|
||||
condition: files.every((file) => file.type?.startsWith('image/')),
|
||||
},
|
||||
];
|
||||
});
|
||||
}
|
||||
if (capabilities.fileSearchEnabled && fileSearchAllowedByAgent) {
|
||||
_options.push({
|
||||
label: localize('com_ui_upload_file_search'),
|
||||
@@ -73,7 +107,16 @@ const DragDropModal = ({ onOptionSelect, setShowModal, files, isVisible }: DragD
|
||||
}
|
||||
|
||||
return _options;
|
||||
}, [capabilities, files, localize, fileSearchAllowedByAgent, codeAllowedByAgent]);
|
||||
}, [
|
||||
files,
|
||||
localize,
|
||||
provider,
|
||||
endpoint,
|
||||
endpointType,
|
||||
capabilities,
|
||||
codeAllowedByAgent,
|
||||
fileSearchAllowedByAgent,
|
||||
]);
|
||||
|
||||
if (!isVisible) {
|
||||
return null;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user