Compare commits

..

32 Commits

Author SHA1 Message Date
Ruben Talstra
32a0998e4d Merge branch 'dev' into feat/openid-custom-data 2025-05-16 07:55:32 +02:00
Ruben Talstra
8f460b9f75 Merge branch 'dev' into feat/openid-custom-data 2025-05-15 17:22:15 +02:00
Ruben Talstra
c925f9f39c 🚀 feat: Add Cloudflare Turnstile support (#5987)
* 🚀 feat: Add @marsidev/react-turnstile dependency to package.json and package-lock.json

* 🚀 feat: Integrate Cloudflare Turnstile configuration support in AppService and add schema validation

* 🚀 feat: Implemented Cloudflare Turnstile integration in Login and Registration forms

* 🚀 feat: Enhance AppService tests with additional mocks and configuration setups

* 🚀 feat: Comment out outdated config version warning tests in AppService.spec.js

* 🚀 feat: Remove outdated warning tests and add new checks for environment variables and API health

* 🔧 test: Update AppService.spec.js to use expect.anything() for paths validation

* 🔧 test: Refactor AppService.spec.js to streamline mocks and enhance clarity

* 🔧 chore: removed not needed test

* Potential fix for code scanning alert no. 5638: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5629: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5642: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Update turnstile.js

* Potential fix for code scanning alert no. 5634: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5646: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5647: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2025-05-15 09:38:58 -04:00
matt burnett
71effb1a66 🔃 refactor: AgentFooter to conditionally render buttons based on activePanel (#7306) 2025-05-15 09:37:14 -04:00
andresgit
e3acd18c07 🎨 feat: add copy-tex to improve copying KaTeX (#7308)
When selecting equations and using copy paste, uses the correct latex code.

Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>
2025-05-15 09:35:48 -04:00
Ruben Talstra
b661057b97 fix conflict 2025-05-14 21:12:56 +02:00
Ruben Talstra
0d0f408d85 Merge branch 'main' into feat/openid-custom-data 2025-05-14 21:11:58 +02:00
Ruben Talstra
eae6a969f4 fix conflict 2025-05-14 21:11:41 +02:00
Ruben Talstra
4a72821d55 Merge branch 'main' into feat/openid-custom-data 2025-04-11 15:14:01 +02:00
Ruben Talstra
284fc82d8e 🔧 chore: Bump version to 0.0.7 in package.json and package-lock.json 2025-04-10 19:15:10 +02:00
Ruben Talstra
20ad59c6f5 🔧 chore: Bump version to 0.7.791 and update dependencies 2025-04-10 19:14:13 +02:00
Ruben Talstra
f0a42d20a2 Merge branch 'main' into feat/openid-custom-data 2025-04-10 19:13:03 +02:00
Ruben Talstra
1cfb9f1b3a 🚀 feat: Bump package version to 0.7.790 2025-04-10 19:12:48 +02:00
Ruben Talstra
232cdaa5f7 🚀 feat: Bump package version to 0.7.790 2025-04-10 19:11:36 +02:00
Ruben Talstra
b4f57a18a7 Merge branch 'main' into feat/openid-custom-data 2025-03-25 23:32:59 +01:00
Ruben Talstra
291f76207f 🚀 feat: Refactor customOpenIdData handling to use an object instead of Map and update return types 2025-03-24 10:00:11 +01:00
Ruben Talstra
51cfd9a520 🚀 feat: Bump package version to 0.7.75 2025-03-24 09:37:22 +01:00
Ruben Talstra
2267a251fa 🚀 feat: Change customOpenIdData to be optional and use Record<string, unknown> type 2025-03-24 09:36:12 +01:00
Ruben Talstra
7a5be00f71 🚀 feat: Update openid-client dependency to version 5.7.1 2025-03-24 09:32:23 +01:00
Ruben Talstra
3628572aea 🚀 feat: Update package version to 0.0.6 and add Microsoft Graph client dependency 2025-03-24 09:31:41 +01:00
Ruben Talstra
b8215c314f Merge branch 'main' into feat/openid-custom-data 2025-03-24 09:23:30 +01:00
Ruben Talstra
ec1a31e852 refactor: remove customOpenIdData property from user schema 2025-03-24 09:23:08 +01:00
Ruben Talstra
5c01eaa36c chore: update .env.example to include OPENID_BUTTON_LABEL and OPENID_IMAGE_URL 2025-03-24 09:21:19 +01:00
Ruben Talstra
8f783180a6 chore: update dependencies for openid-client and passport 2025-03-24 09:19:37 +01:00
Ruben Talstra
cd922131a9 fix: package issue update. 2025-02-12 13:38:15 +01:00
Ruben Talstra
102e79b185 refactor: refactored the openidStrategy.js so it's more readable and understandable. 2025-02-12 13:22:39 +01:00
Ruben Talstra
65a0e1db54 fixed: package issue 2025-02-12 13:12:56 +01:00
Ruben Talstra
f1e031a9f5 refactor: updated the code basted on suggestion. 2025-02-12 13:07:13 +01:00
Ruben Talstra
244b9f94dc Merge branch 'main' into feat/openid-custom-data 2025-02-12 12:58:58 +01:00
Ruben Talstra
17afeb5c36 chore: resolving conflicts 2025-02-12 12:58:41 +01:00
Ruben Talstra
ce407626fd feat: add missing package: @microsoft/microsoft-graph-client 2025-02-11 16:48:16 +01:00
Ruben Talstra
2ef6e4462d feat: Add custom fields & role assignment to OpenID strategy (#5612)
* started with Support for Customizable OpenID Profile Fields via Environment Variable

* kept as much of the original code as possible but still added the custom data mapper

* kept as much of the original code as possible but still added the custom data mapper

* resolved merge conflicts

* resolved merge conflicts

* resolved merge conflicts

* resolved merge conflicts

* removed some unneeded comments

* fix: conflicted issue

---------

Co-authored-by: Talstra Ruben SRSNL <ruben.talstra@stadlerrail.com>
2025-02-11 16:42:05 +01:00
281 changed files with 4092 additions and 15786 deletions

View File

@@ -20,8 +20,8 @@ DOMAIN_CLIENT=http://localhost:3080
DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
# Defaulted to 1.
TRUST_PROXY=1
@@ -88,7 +88,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -438,27 +438,16 @@ OPENID_USERNAME_CLAIM=
# Set to determine which user info property returned from OpenID Provider to store as the User's name
OPENID_NAME_CLAIM=
OPENID_CUSTOM_DATA=
OPENID_PROVIDER=
OPENID_ADMIN_ROLE=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
OPENID_AUTO_REDIRECT=false
# Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication
OPENID_USE_PKCE=false
#Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token.
OPENID_REUSE_TOKENS=
#By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint.
#If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache.
#Default is true.
OPENID_JWKS_URL_CACHE_ENABLED=
OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED=
OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE = "user.read" # example for Scope Needed for Microsoft Graph API
# Set to true to use the OpenID Connect end session endpoint for logout
OPENID_USE_END_SESSION_ENDPOINT=
# LDAP
LDAP_URL=
LDAP_BIND_DN=
@@ -578,9 +567,9 @@ HELP_AND_FAQ_URL=https://librechat.ai
# users always get the latest version. Customize #
# only if you understand caching implications. #
# INDEX_CACHE_CONTROL=no-cache, no-store, must-revalidate
# INDEX_PRAGMA=no-cache
# INDEX_EXPIRES=0
# INDEX_HTML_CACHE_CONTROL=no-cache, no-store, must-revalidate
# INDEX_HTML_PRAGMA=no-cache
# INDEX_HTML_EXPIRES=0
# no-cache: Forces validation with server before using cached version
# no-store: Prevents storing the response entirely
@@ -590,33 +579,3 @@ HELP_AND_FAQ_URL=https://librechat.ai
# OpenWeather #
#=====================================================#
OPENWEATHER_API_KEY=
#====================================#
# LibreChat Code Interpreter API #
#====================================#
# https://code.librechat.ai
# LIBRECHAT_CODE_API_KEY=your-key
#======================#
# Web Search #
#======================#
# Note: All of the following variable names can be customized.
# Omit values to allow user to provide them.
# For more information on configuration values, see:
# https://librechat.ai/docs/features/web_search
# Search Provider (Required)
# SERPER_API_KEY=your_serper_api_key
# Scraper (Required)
# FIRECRAWL_API_KEY=your_firecrawl_api_key
# Optional: Custom Firecrawl API URL
# FIRECRAWL_API_URL=your_firecrawl_api_url
# Reranker (Required)
# JINA_API_KEY=your_jina_api_key
# or
# COHERE_API_KEY=your_cohere_api_key

View File

@@ -26,15 +26,8 @@ jobs:
uses: azure/setup-helm@v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Build Subchart Deps
run: |
cd helm/librechat-rag-api
helm dependency build
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0
with:
charts_dir: helm
skip_existing: true
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

View File

@@ -22,7 +22,7 @@ jobs:
# Define paths
I18N_FILE="client/src/locales/en/translation.json"
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src")
SOURCE_DIRS=("client/src" "api")
# Check if translation file exists
if [[ ! -f "$I18N_FILE" ]]; then

10
.gitignore vendored
View File

@@ -52,9 +52,8 @@ bower_components/
*.d.ts
!vite-env.d.ts
# AI
# Cline
.clineignore
.cursor
# Floobits
.floo
@@ -114,11 +113,4 @@ uploads/
# owner
release/
# Helm
helm/librechat/Chart.lock
helm/**/charts/
helm/**/.values.yaml
!/client/src/@types/i18next.d.ts

View File

@@ -5,38 +5,23 @@ All notable changes to this project will be documented in this file.
## [Unreleased]
### ✨ New Features
- ✨ feat: implement search parameter updates by **@mawburn** in [#7151](https://github.com/danny-avila/LibreChat/pull/7151)
- 🎏 feat: Add MCP support for Streamable HTTP Transport by **@benverhees** in [#7353](https://github.com/danny-avila/LibreChat/pull/7353)
- 🔒 feat: Add Content Security Policy using Helmet middleware by **@rubentalstra** in [#7377](https://github.com/danny-avila/LibreChat/pull/7377)
- ✨ feat: Add Normalization for MCP Server Names by **@danny-avila** in [#7421](https://github.com/danny-avila/LibreChat/pull/7421)
- 📊 feat: Improve Helm Chart by **@hofq** in [#3638](https://github.com/danny-avila/LibreChat/pull/3638)
### 🌍 Internationalization
- 🌍 i18n: Add `Danish` and `Czech` and `Catalan` localization support by **@rubentalstra** in [#7373](https://github.com/danny-avila/LibreChat/pull/7373)
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7375](https://github.com/danny-avila/LibreChat/pull/7375)
### 🔧 Fixes
- 💬 fix: update aria-label for accessibility in ConvoLink component by **@berry-13** in [#7320](https://github.com/danny-avila/LibreChat/pull/7320)
- 🔑 fix: use `apiKey` instead of `openAIApiKey` in OpenAI-like Config by **@danny-avila** in [#7337](https://github.com/danny-avila/LibreChat/pull/7337)
- 🔄 fix: update navigation logic in `useFocusChatEffect` to ensure correct search parameters are used by **@mawburn** in [#7340](https://github.com/danny-avila/LibreChat/pull/7340)
- 🔄 fix: Improve MCP Connection Cleanup by **@danny-avila** in [#7400](https://github.com/danny-avila/LibreChat/pull/7400)
- 🛡️ fix: Preset and Validation Logic for URL Query Params by **@danny-avila** in [#7407](https://github.com/danny-avila/LibreChat/pull/7407)
- 🌘 fix: artifact of preview text is illegible in dark mode by **@nhtruong** in [#7405](https://github.com/danny-avila/LibreChat/pull/7405)
- 🛡️ fix: Temporarily Remove CSP until Configurable by **@danny-avila** in [#7419](https://github.com/danny-avila/LibreChat/pull/7419)
- 💽 fix: Exclude index page `/` from static cache settings by **@sbruel** in [#7382](https://github.com/danny-avila/LibreChat/pull/7382)
### ⚙️ Other Changes
- 📜 docs: CHANGELOG for release v0.7.8 by **@github-actions[bot]** in [#7290](https://github.com/danny-avila/LibreChat/pull/7290)
- 📦 chore: Update API Package Dependencies by **@danny-avila** in [#7359](https://github.com/danny-avila/LibreChat/pull/7359)
- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7321](https://github.com/danny-avila/LibreChat/pull/7321)
@@ -82,6 +67,7 @@ Changes from v0.7.8-rc1 to v0.7.8.
---
## [v0.7.8-rc1] -
## [v0.7.8-rc1] -
Changes from v0.7.7 to v0.7.8-rc1.

View File

@@ -71,11 +71,6 @@
- [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions
- 🔍 **Web Search**:
- Search the internet and retrieve relevant information to enhance your AI context
- Combines search providers, content scrapers, and result rerankers for optimal results
- **[Learn More →](https://www.librechat.ai/docs/features/web_search)**
- 🪄 **Generative UI with Code Artifacts**:
- [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat

View File

@@ -70,7 +70,7 @@ class AnthropicClient extends BaseClient {
this.message_delta;
/** Whether the model is part of the Claude 3 Family
* @type {boolean} */
this.isClaudeLatest;
this.isClaude3;
/** Whether to use Messages API or Completions API
* @type {boolean} */
this.useMessages;
@@ -116,8 +116,7 @@ class AnthropicClient extends BaseClient {
);
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaudeLatest =
/claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch);
this.isClaude3 = modelMatch.includes('claude-3');
this.isLegacyOutput = !(
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
);
@@ -131,7 +130,7 @@ class AnthropicClient extends BaseClient {
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
}
this.useMessages = this.isClaudeLatest || !!this.options.attachments;
this.useMessages = this.isClaude3 || !!this.options.attachments;
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
@@ -655,10 +654,7 @@ class AnthropicClient extends BaseClient {
);
};
if (
/claude-[3-9]/.test(this.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model)
) {
if (this.modelOptions.model.includes('claude-3')) {
await buildMessagesPayload();
processTokens();
return {

View File

@@ -15,7 +15,7 @@ describe('AnthropicClient', () => {
{
role: 'user',
isCreatedByUser: true,
text: "What's up",
text: 'What\'s up',
messageId: '3',
parentMessageId: '2',
},
@@ -170,7 +170,7 @@ describe('AnthropicClient', () => {
client.options.modelLabel = 'Claude-2';
const result = await client.buildMessages(messages, parentMessageId);
const { prompt } = result;
expect(prompt).toContain("Human's name: John");
expect(prompt).toContain('Human\'s name: John');
expect(prompt).toContain('You are Claude-2');
});
});
@@ -244,64 +244,6 @@ describe('AnthropicClient', () => {
);
});
describe('Claude 4 model headers', () => {
it('should add "prompt-caching" beta header for claude-sonnet-4 model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-sonnet-4-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should add "prompt-caching" beta header for claude-opus-4 model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-opus-4-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should add "prompt-caching" beta header for claude-4-sonnet model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-4-sonnet-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should add "prompt-caching" beta header for claude-4-opus model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-4-opus-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
});
it('should not add beta header for claude-3-5-sonnet-latest model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
@@ -787,223 +729,4 @@ describe('AnthropicClient', () => {
expect(capturedOptions).toHaveProperty('topK', 10);
expect(capturedOptions).toHaveProperty('topP', 0.9);
});
describe('isClaudeLatest', () => {
it('should set isClaudeLatest to true for claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3-sonnet-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-3.5 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3.5-sonnet-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-sonnet-4 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-sonnet-4-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-opus-4 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-opus-4-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-3.5-haiku models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3.5-haiku-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to false for claude-2 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-instant models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-instant',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-sonnet-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-sonnet-3-20240229',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-opus-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-opus-3-20240229',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-haiku-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-haiku-3-20240229',
},
});
expect(client.isClaudeLatest).toBe(false);
});
});
describe('configureReasoning', () => {
it('should enable thinking for claude-opus-4 and claude-sonnet-4 models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
// Test claude-opus-4
client.setOptions({
modelOptions: {
model: 'claude-opus-4-20250514',
},
thinking: true,
thinkingBudget: 2000,
});
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('thinking');
expect(capturedOptions.thinking).toEqual({
type: 'enabled',
budget_tokens: 2000,
});
// Test claude-sonnet-4
client.setOptions({
modelOptions: {
model: 'claude-sonnet-4-20250514',
},
thinking: true,
thinkingBudget: 2000,
});
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('thinking');
expect(capturedOptions.thinking).toEqual({
type: 'enabled',
budget_tokens: 2000,
});
});
});
});
describe('Claude Model Tests', () => {
it('should handle Claude 3 and 4 series models correctly', () => {
const client = new AnthropicClient('test-key');
// Claude 3 series models
const claude3Models = [
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
'claude-3-5-sonnet-20240620',
'claude-3-5-haiku-20240620',
'claude-3.5-sonnet-20240620',
'claude-3.5-haiku-20240620',
'claude-3.7-sonnet-20240620',
'claude-3.7-haiku-20240620',
'anthropic/claude-3-opus-20240229',
'claude-3-opus-20240229/anthropic',
];
// Claude 4 series models
const claude4Models = [
'claude-sonnet-4-20250514',
'claude-opus-4-20250514',
'claude-4-sonnet-20250514',
'claude-4-opus-20250514',
'anthropic/claude-sonnet-4-20250514',
'claude-sonnet-4-20250514/anthropic',
];
// Test Claude 3 series
claude3Models.forEach((model) => {
client.setOptions({ modelOptions: { model } });
expect(
/claude-[3-9]/.test(client.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
).toBe(true);
});
// Test Claude 4 series
claude4Models.forEach((model) => {
client.setOptions({ modelOptions: { model } });
expect(
/claude-[3-9]/.test(client.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
).toBe(true);
});
// Test non-Claude 3/4 models
const nonClaudeModels = ['claude-2', 'claude-instant', 'gpt-4', 'gpt-3.5-turbo'];
nonClaudeModels.forEach((model) => {
client.setOptions({ modelOptions: { model } });
expect(
/claude-[3-9]/.test(client.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
).toBe(false);
});
});
});

View File

@@ -30,7 +30,7 @@ const DEFAULT_IMAGE_EDIT_DESCRIPTION =
When to use \`image_edit_oai\`:
- The user wants to modify, extend, or remix one **or more** uploaded images, either:
- Previously generated, or in the current request (both to be included in the \`image_ids\` array).
- Previously generated, or in the current request (both to be included in the \`image_ids\` array).
- Always when the user refers to uploaded images for editing, enhancement, remixing, style transfer, or combining elements.
- Any current or existing images are to be used as visual guides.
- If there are any files in the current request, they are more likely than not expected as references for image edit requests.

View File

@@ -1,13 +1,7 @@
const { SerpAPI } = require('@langchain/community/tools/serpapi');
const { Calculator } = require('@langchain/community/tools/calculator');
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
const {
Tools,
Constants,
EToolResources,
loadWebSearchAuth,
replaceSpecialVars,
} = require('librechat-data-provider');
const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
const { Tools, Constants, EToolResources } = require('librechat-data-provider');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
@@ -144,6 +138,7 @@ const loadTools = async ({
agent,
model,
endpoint,
useSpecs,
tools = [],
options = {},
functions = true,
@@ -268,33 +263,6 @@ const loadTools = async ({
return createFileSearchTool({ req: options.req, files, entity_id: agent?.id });
};
continue;
} else if (tool === Tools.web_search) {
const webSearchConfig = options?.req?.app?.locals?.webSearch;
const result = await loadWebSearchAuth({
userId: user,
loadAuthValues,
webSearchConfig,
});
const { onSearchResults, onGetHighlights } = options?.[Tools.web_search] ?? {};
requestedTools[tool] = async () => {
toolContextMap[tool] = `# \`${tool}\`:
Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
1. **Execute immediately without preface** when using \`${tool}\`.
2. **After the search, begin with a brief summary** that directly addresses the query without headers or explaining your process.
3. **Structure your response clearly** using Markdown formatting (Level 2 headers for sections, lists for multiple points, tables for comparisons).
4. **Cite sources properly** according to the citation anchor format, utilizing group anchors when appropriate.
5. **Tailor your approach to the query type** (academic, news, coding, etc.) while maintaining an expert, journalistic, unbiased tone.
6. **Provide comprehensive information** with specific details, examples, and as much relevant context as possible from search results.
7. **Avoid moralizing language.**
`.trim();
return createSearchTool({
...result.authResult,
onSearchResults,
onGetHighlights,
logger,
});
};
continue;
} else if (tool && appTools[tool] && mcpToolPattern.test(tool)) {
requestedTools[tool] = async () =>
createMCPTool({

View File

@@ -61,10 +61,6 @@ const abortKeys = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
const openIdExchangedTokensCache = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.OPENID_EXCHANGED_TOKENS, ttl: Time.TEN_MINUTES });
const namespaces = {
[CacheKeys.ROLES]: roles,
[CacheKeys.CONFIG_STORE]: config,
@@ -102,7 +98,6 @@ const namespaces = {
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
[CacheKeys.FLOWS]: flows,
[CacheKeys.OPENID_EXCHANGED_TOKENS]: openIdExchangedTokensCache,
};
/**

View File

@@ -76,13 +76,10 @@ if (REDIS_URI && isEnabled(USE_REDIS)) {
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
}
const pingInterval = setInterval(
() => {
logger.debug('KeyvRedis ping');
keyvRedis.client.ping().catch((err) => logger.error('Redis keep-alive ping failed:', err));
},
5 * 60 * 1000,
);
const pingInterval = setInterval(() => {
logger.debug('KeyvRedis ping');
keyvRedis.client.ping().catch(err => logger.error('Redis keep-alive ping failed:', err));
}, 5 * 60 * 1000);
keyvRedis.on('ready', () => {
logger.info('KeyvRedis connection ready');

View File

@@ -11,8 +11,5 @@ module.exports = {
moduleNameMapper: {
'~/(.*)': '<rootDir>/$1',
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
'^openid-client/passport$': '<rootDir>/test/__mocks__/openid-client-passport.js', // Mock for the passport strategy part
'^openid-client$': '<rootDir>/test/__mocks__/openid-client.js',
},
transformIgnorePatterns: ['/node_modules/(?!(openid-client|oauth4webapi|jose)/).*/'],
};

View File

@@ -21,19 +21,7 @@ const Agent = mongoose.model('agent', agentSchema);
* @throws {Error} If the agent creation fails.
*/
const createAgent = async (agentData) => {
const { author, ...versionData } = agentData;
const timestamp = new Date();
const initialAgentData = {
...agentData,
versions: [
{
...versionData,
createdAt: timestamp,
updatedAt: timestamp,
},
],
};
return (await Agent.create(initialAgentData)).toObject();
return (await Agent.create(agentData)).toObject();
};
/**
@@ -60,17 +48,12 @@ const loadEphemeralAgent = ({ req, agent_id, endpoint, model_parameters: _m }) =
const { model, ...model_parameters } = _m;
/** @type {Record<string, FunctionTool>} */
const availableTools = req.app.locals.availableTools;
/** @type {TEphemeralAgent | null} */
const ephemeralAgent = req.body.ephemeralAgent;
const mcpServers = new Set(ephemeralAgent?.mcp);
const mcpServers = new Set(req.body.ephemeralAgent?.mcp);
/** @type {string[]} */
const tools = [];
if (ephemeralAgent?.execute_code === true) {
if (req.body.ephemeralAgent?.execute_code === true) {
tools.push(Tools.execute_code);
}
if (ephemeralAgent?.web_search === true) {
tools.push(Tools.web_search);
}
if (mcpServers.size > 0) {
for (const toolName of Object.keys(availableTools)) {
@@ -120,8 +103,6 @@ const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
return null;
}
agent.version = agent.versions ? agent.versions.length : 0;
if (agent.author.toString() === req.user.id) {
return agent;
}
@@ -146,155 +127,18 @@ const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
}
};
/**
* Check if a version already exists in the versions array, excluding timestamp and author fields
* @param {Object} updateData - The update data to compare
* @param {Array} versions - The existing versions array
* @returns {Object|null} - The matching version if found, null otherwise
*/
const isDuplicateVersion = (updateData, currentData, versions) => {
if (!versions || versions.length === 0) {
return null;
}
const excludeFields = [
'_id',
'id',
'createdAt',
'updatedAt',
'author',
'updatedBy',
'created_at',
'updated_at',
'__v',
'agent_ids',
'versions',
];
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
if (Object.keys(directUpdates).length === 0) {
return null;
}
const wouldBeVersion = { ...currentData, ...directUpdates };
const lastVersion = versions[versions.length - 1];
const allFields = new Set([...Object.keys(wouldBeVersion), ...Object.keys(lastVersion)]);
const importantFields = Array.from(allFields).filter((field) => !excludeFields.includes(field));
let isMatch = true;
for (const field of importantFields) {
if (!wouldBeVersion[field] && !lastVersion[field]) {
continue;
}
if (Array.isArray(wouldBeVersion[field]) && Array.isArray(lastVersion[field])) {
if (wouldBeVersion[field].length !== lastVersion[field].length) {
isMatch = false;
break;
}
// Special handling for projectIds (MongoDB ObjectIds)
if (field === 'projectIds') {
const wouldBeIds = wouldBeVersion[field].map((id) => id.toString()).sort();
const versionIds = lastVersion[field].map((id) => id.toString()).sort();
if (!wouldBeIds.every((id, i) => id === versionIds[i])) {
isMatch = false;
break;
}
}
// Handle arrays of objects like tool_kwargs
else if (typeof wouldBeVersion[field][0] === 'object' && wouldBeVersion[field][0] !== null) {
const sortedWouldBe = [...wouldBeVersion[field]].map((item) => JSON.stringify(item)).sort();
const sortedVersion = [...lastVersion[field]].map((item) => JSON.stringify(item)).sort();
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
isMatch = false;
break;
}
} else {
const sortedWouldBe = [...wouldBeVersion[field]].sort();
const sortedVersion = [...lastVersion[field]].sort();
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
isMatch = false;
break;
}
}
} else if (field === 'model_parameters') {
const wouldBeParams = wouldBeVersion[field] || {};
const lastVersionParams = lastVersion[field] || {};
if (JSON.stringify(wouldBeParams) !== JSON.stringify(lastVersionParams)) {
isMatch = false;
break;
}
} else if (wouldBeVersion[field] !== lastVersion[field]) {
isMatch = false;
break;
}
}
return isMatch ? lastVersion : null;
};
/**
* Update an agent with new data without overwriting existing
* properties, or create a new agent if it doesn't exist.
* When an agent is updated, a copy of the current state will be saved to the versions array.
*
* @param {Object} searchParameter - The search parameters to find the agent to update.
* @param {string} searchParameter.id - The ID of the agent to update.
* @param {string} [searchParameter.author] - The user ID of the agent's author.
* @param {Object} updateData - An object containing the properties to update.
* @param {string} [updatingUserId] - The ID of the user performing the update (used for tracking non-author updates).
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
* @throws {Error} If the update would create a duplicate version
*/
const updateAgent = async (searchParameter, updateData, updatingUserId = null) => {
const updateAgent = async (searchParameter, updateData) => {
const options = { new: true, upsert: false };
const currentAgent = await Agent.findOne(searchParameter);
if (currentAgent) {
const { __v, _id, id, versions, author, ...versionData } = currentAgent.toObject();
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
if (Object.keys(directUpdates).length > 0 && versions && versions.length > 0) {
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions);
if (duplicateVersion) {
const error = new Error(
'Duplicate version: This would create a version identical to an existing one',
);
error.statusCode = 409;
error.details = {
duplicateVersion,
versionIndex: versions.findIndex(
(v) => JSON.stringify(duplicateVersion) === JSON.stringify(v),
),
};
throw error;
}
}
const versionEntry = {
...versionData,
...directUpdates,
updatedAt: new Date(),
};
// Always store updatedBy field to track who made the change
if (updatingUserId) {
versionEntry.updatedBy = new mongoose.Types.ObjectId(updatingUserId);
}
updateData.$push = {
...($push || {}),
versions: versionEntry,
};
}
return Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
};
@@ -307,7 +151,7 @@ const updateAgent = async (searchParameter, updateData, updatingUserId = null) =
* @param {string} params.file_id
* @returns {Promise<Agent>} The updated agent.
*/
const addAgentResourceFile = async ({ req, agent_id, tool_resource, file_id }) => {
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
const searchParameter = { id: agent_id };
let agent = await getAgent(searchParameter);
if (!agent) {
@@ -333,7 +177,7 @@ const addAgentResourceFile = async ({ req, agent_id, tool_resource, file_id }) =
},
};
const updatedAgent = await updateAgent(searchParameter, updateData, req?.user?.id);
const updatedAgent = await updateAgent(searchParameter, updateData);
if (updatedAgent) {
return updatedAgent;
} else {
@@ -497,7 +341,7 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
delete updateQuery.author;
}
const updatedAgent = await updateAgent(updateQuery, updateOps, user.id);
const updatedAgent = await updateAgent(updateQuery, updateOps);
if (updatedAgent) {
return updatedAgent;
}
@@ -514,40 +358,6 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
return await getAgent({ id: agentId });
};
/**
* Reverts an agent to a specific version in its version history.
* @param {Object} searchParameter - The search parameters to find the agent to revert.
* @param {string} searchParameter.id - The ID of the agent to revert.
* @param {string} [searchParameter.author] - The user ID of the agent's author.
* @param {number} versionIndex - The index of the version to revert to in the versions array.
* @returns {Promise<MongoAgent>} The updated agent document after reverting.
* @throws {Error} If the agent is not found or the specified version does not exist.
*/
const revertAgentVersion = async (searchParameter, versionIndex) => {
const agent = await Agent.findOne(searchParameter);
if (!agent) {
throw new Error('Agent not found');
}
if (!agent.versions || !agent.versions[versionIndex]) {
throw new Error(`Version ${versionIndex} not found`);
}
const revertToVersion = agent.versions[versionIndex];
const updateData = {
...revertToVersion,
};
delete updateData._id;
delete updateData.id;
delete updateData.versions;
delete updateData.author;
delete updateData.updatedBy;
return Agent.findOneAndUpdate(searchParameter, updateData, { new: true }).lean();
};
module.exports = {
Agent,
getAgent,
@@ -559,5 +369,4 @@ module.exports = {
updateAgentProjects,
addAgentResourceFile,
removeAgentResourceFiles,
revertAgentVersion,
};

View File

@@ -1,25 +1,7 @@
const originalEnv = {
CREDS_KEY: process.env.CREDS_KEY,
CREDS_IV: process.env.CREDS_IV,
};
process.env.CREDS_KEY = '0123456789abcdef0123456789abcdef';
process.env.CREDS_IV = '0123456789abcdef';
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const {
Agent,
addAgentResourceFile,
removeAgentResourceFiles,
createAgent,
updateAgent,
getAgent,
deleteAgent,
getListAgents,
updateAgentProjects,
} = require('./Agent');
const { Agent, addAgentResourceFile, removeAgentResourceFiles } = require('./Agent');
describe('Agent Resource File Operations', () => {
let mongoServer;
@@ -33,8 +15,6 @@ describe('Agent Resource File Operations', () => {
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
process.env.CREDS_KEY = originalEnv.CREDS_KEY;
process.env.CREDS_IV = originalEnv.CREDS_IV;
});
beforeEach(async () => {
@@ -352,674 +332,3 @@ describe('Agent Resource File Operations', () => {
expect(finalFileIds).toHaveLength(0);
});
});
describe('Agent CRUD Operations', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
});
test('should create and get an agent', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const newAgent = await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: authorId,
description: 'Test description',
});
expect(newAgent).toBeDefined();
expect(newAgent.id).toBe(agentId);
expect(newAgent.name).toBe('Test Agent');
const retrievedAgent = await getAgent({ id: agentId });
expect(retrievedAgent).toBeDefined();
expect(retrievedAgent.id).toBe(agentId);
expect(retrievedAgent.name).toBe('Test Agent');
expect(retrievedAgent.description).toBe('Test description');
});
test('should delete an agent', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
const agentBeforeDelete = await getAgent({ id: agentId });
expect(agentBeforeDelete).toBeDefined();
await deleteAgent({ id: agentId });
const agentAfterDelete = await getAgent({ id: agentId });
expect(agentAfterDelete).toBeNull();
});
test('should list agents by author', async () => {
const authorId = new mongoose.Types.ObjectId();
const otherAuthorId = new mongoose.Types.ObjectId();
const agentIds = [];
for (let i = 0; i < 5; i++) {
const id = `agent_${uuidv4()}`;
agentIds.push(id);
await createAgent({
id,
name: `Agent ${i}`,
provider: 'test',
model: 'test-model',
author: authorId,
});
}
for (let i = 0; i < 3; i++) {
await createAgent({
id: `other_agent_${uuidv4()}`,
name: `Other Agent ${i}`,
provider: 'test',
model: 'test-model',
author: otherAuthorId,
});
}
const result = await getListAgents({ author: authorId.toString() });
expect(result).toBeDefined();
expect(result.data).toBeDefined();
expect(result.data).toHaveLength(5);
expect(result.has_more).toBe(true);
for (const agent of result.data) {
expect(agent.author).toBe(authorId.toString());
}
});
test('should update agent projects', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const projectId1 = new mongoose.Types.ObjectId();
const projectId2 = new mongoose.Types.ObjectId();
const projectId3 = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Project Test Agent',
provider: 'test',
model: 'test-model',
author: authorId,
projectIds: [projectId1],
});
await updateAgent(
{ id: agentId },
{ $addToSet: { projectIds: { $each: [projectId2, projectId3] } } },
);
await updateAgent({ id: agentId }, { $pull: { projectIds: projectId1 } });
await updateAgent({ id: agentId }, { projectIds: [projectId2, projectId3] });
const updatedAgent = await getAgent({ id: agentId });
expect(updatedAgent.projectIds).toHaveLength(2);
expect(updatedAgent.projectIds.map((id) => id.toString())).toContain(projectId2.toString());
expect(updatedAgent.projectIds.map((id) => id.toString())).toContain(projectId3.toString());
expect(updatedAgent.projectIds.map((id) => id.toString())).not.toContain(projectId1.toString());
await updateAgent({ id: agentId }, { projectIds: [] });
const emptyProjectsAgent = await getAgent({ id: agentId });
expect(emptyProjectsAgent.projectIds).toHaveLength(0);
const nonExistentId = `agent_${uuidv4()}`;
await expect(
updateAgentProjects({
id: nonExistentId,
projectIds: [projectId1],
}),
).rejects.toThrow();
});
test('should handle ephemeral agent loading', async () => {
const agentId = 'ephemeral_test';
const endpoint = 'openai';
const originalModule = jest.requireActual('librechat-data-provider');
const mockDataProvider = {
...originalModule,
Constants: {
...originalModule.Constants,
EPHEMERAL_AGENT_ID: 'ephemeral_test',
},
};
jest.doMock('librechat-data-provider', () => mockDataProvider);
const mockReq = {
user: { id: 'user123' },
body: {
promptPrefix: 'This is a test instruction',
ephemeralAgent: {
execute_code: true,
mcp: ['server1', 'server2'],
},
},
app: {
locals: {
availableTools: {
tool__server1: {},
tool__server2: {},
another_tool: {},
},
},
},
};
const params = {
req: mockReq,
agent_id: agentId,
endpoint,
model_parameters: {
model: 'gpt-4',
temperature: 0.7,
},
};
expect(agentId).toBeDefined();
expect(endpoint).toBeDefined();
jest.dontMock('librechat-data-provider');
});
test('should handle loadAgent functionality and errors', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Test Load Agent',
provider: 'test',
model: 'test-model',
author: authorId,
tools: ['tool1', 'tool2'],
});
const agent = await getAgent({ id: agentId });
expect(agent).toBeDefined();
expect(agent.id).toBe(agentId);
expect(agent.name).toBe('Test Load Agent');
expect(agent.tools).toEqual(expect.arrayContaining(['tool1', 'tool2']));
const mockLoadAgent = jest.fn().mockResolvedValue(agent);
const loadedAgent = await mockLoadAgent();
expect(loadedAgent).toBeDefined();
expect(loadedAgent.id).toBe(agentId);
const nonExistentId = `agent_${uuidv4()}`;
const nonExistentAgent = await getAgent({ id: nonExistentId });
expect(nonExistentAgent).toBeNull();
const mockLoadAgentError = jest.fn().mockRejectedValue(new Error('No agent found with ID'));
await expect(mockLoadAgentError()).rejects.toThrow('No agent found with ID');
});
});
describe('Agent Version History', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
});
test('should create an agent with a single entry in versions array', async () => {
const agentId = `agent_${uuidv4()}`;
const agent = await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
expect(agent.versions).toBeDefined();
expect(Array.isArray(agent.versions)).toBe(true);
expect(agent.versions).toHaveLength(1);
expect(agent.versions[0].name).toBe('Test Agent');
expect(agent.versions[0].provider).toBe('test');
expect(agent.versions[0].model).toBe('test-model');
});
test('should accumulate version history across multiple updates', async () => {
const agentId = `agent_${uuidv4()}`;
const author = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'First Name',
provider: 'test',
model: 'test-model',
author,
description: 'First description',
});
await updateAgent({ id: agentId }, { name: 'Second Name', description: 'Second description' });
await updateAgent({ id: agentId }, { name: 'Third Name', model: 'new-model' });
const finalAgent = await updateAgent({ id: agentId }, { description: 'Final description' });
expect(finalAgent.versions).toBeDefined();
expect(Array.isArray(finalAgent.versions)).toBe(true);
expect(finalAgent.versions).toHaveLength(4);
expect(finalAgent.versions[0].name).toBe('First Name');
expect(finalAgent.versions[0].description).toBe('First description');
expect(finalAgent.versions[0].model).toBe('test-model');
expect(finalAgent.versions[1].name).toBe('Second Name');
expect(finalAgent.versions[1].description).toBe('Second description');
expect(finalAgent.versions[1].model).toBe('test-model');
expect(finalAgent.versions[2].name).toBe('Third Name');
expect(finalAgent.versions[2].description).toBe('Second description');
expect(finalAgent.versions[2].model).toBe('new-model');
expect(finalAgent.versions[3].name).toBe('Third Name');
expect(finalAgent.versions[3].description).toBe('Final description');
expect(finalAgent.versions[3].model).toBe('new-model');
expect(finalAgent.name).toBe('Third Name');
expect(finalAgent.description).toBe('Final description');
expect(finalAgent.model).toBe('new-model');
});
test('should not include metadata fields in version history', async () => {
const agentId = `agent_${uuidv4()}`;
await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
const updatedAgent = await updateAgent({ id: agentId }, { description: 'New description' });
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.versions[0]._id).toBeUndefined();
expect(updatedAgent.versions[0].__v).toBeUndefined();
expect(updatedAgent.versions[0].name).toBe('Test Agent');
expect(updatedAgent.versions[0].author).toBeUndefined();
expect(updatedAgent.versions[1]._id).toBeUndefined();
expect(updatedAgent.versions[1].__v).toBeUndefined();
});
test('should not recursively include previous versions', async () => {
const agentId = `agent_${uuidv4()}`;
await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
await updateAgent({ id: agentId }, { name: 'Updated Name 1' });
await updateAgent({ id: agentId }, { name: 'Updated Name 2' });
const finalAgent = await updateAgent({ id: agentId }, { name: 'Updated Name 3' });
expect(finalAgent.versions).toHaveLength(4);
finalAgent.versions.forEach((version) => {
expect(version.versions).toBeUndefined();
});
});
test('should handle MongoDB operators and field updates correctly', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const projectId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'MongoDB Operator Test',
provider: 'test',
model: 'test-model',
author: authorId,
tools: ['tool1'],
});
await updateAgent(
{ id: agentId },
{
description: 'Updated description',
$push: { tools: 'tool2' },
$addToSet: { projectIds: projectId },
},
);
const firstUpdate = await getAgent({ id: agentId });
expect(firstUpdate.description).toBe('Updated description');
expect(firstUpdate.tools).toContain('tool1');
expect(firstUpdate.tools).toContain('tool2');
expect(firstUpdate.projectIds.map((id) => id.toString())).toContain(projectId.toString());
expect(firstUpdate.versions).toHaveLength(2);
await updateAgent(
{ id: agentId },
{
tools: ['tool2', 'tool3'],
},
);
const secondUpdate = await getAgent({ id: agentId });
expect(secondUpdate.tools).toHaveLength(2);
expect(secondUpdate.tools).toContain('tool2');
expect(secondUpdate.tools).toContain('tool3');
expect(secondUpdate.tools).not.toContain('tool1');
expect(secondUpdate.versions).toHaveLength(3);
await updateAgent(
{ id: agentId },
{
$push: { tools: 'tool3' },
},
);
const thirdUpdate = await getAgent({ id: agentId });
const toolCount = thirdUpdate.tools.filter((t) => t === 'tool3').length;
expect(toolCount).toBe(2);
expect(thirdUpdate.versions).toHaveLength(4);
});
test('should handle parameter objects correctly', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Parameters Test',
provider: 'test',
model: 'test-model',
author: authorId,
model_parameters: { temperature: 0.7 },
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ model_parameters: { temperature: 0.8 } },
);
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.model_parameters.temperature).toBe(0.8);
await updateAgent(
{ id: agentId },
{
model_parameters: {
temperature: 0.8,
max_tokens: 1000,
},
},
);
const complexAgent = await getAgent({ id: agentId });
expect(complexAgent.versions).toHaveLength(3);
expect(complexAgent.model_parameters.temperature).toBe(0.8);
expect(complexAgent.model_parameters.max_tokens).toBe(1000);
await updateAgent({ id: agentId }, { model_parameters: {} });
const emptyParamsAgent = await getAgent({ id: agentId });
expect(emptyParamsAgent.versions).toHaveLength(4);
expect(emptyParamsAgent.model_parameters).toEqual({});
});
test('should detect duplicate versions and reject updates', async () => {
const originalConsoleError = console.error;
console.error = jest.fn();
try {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const projectId1 = new mongoose.Types.ObjectId();
const projectId2 = new mongoose.Types.ObjectId();
const testCases = [
{
name: 'simple field update',
initial: {
name: 'Test Agent',
description: 'Initial description',
},
update: { name: 'Updated Name' },
duplicate: { name: 'Updated Name' },
},
{
name: 'object field update',
initial: {
model_parameters: { temperature: 0.7 },
},
update: { model_parameters: { temperature: 0.8 } },
duplicate: { model_parameters: { temperature: 0.8 } },
},
{
name: 'array field update',
initial: {
tools: ['tool1', 'tool2'],
},
update: { tools: ['tool2', 'tool3'] },
duplicate: { tools: ['tool2', 'tool3'] },
},
{
name: 'projectIds update',
initial: {
projectIds: [projectId1],
},
update: { projectIds: [projectId1, projectId2] },
duplicate: { projectIds: [projectId2, projectId1] },
},
];
for (const testCase of testCases) {
const testAgentId = `agent_${uuidv4()}`;
await createAgent({
id: testAgentId,
provider: 'test',
model: 'test-model',
author: authorId,
...testCase.initial,
});
await updateAgent({ id: testAgentId }, testCase.update);
let error;
try {
await updateAgent({ id: testAgentId }, testCase.duplicate);
} catch (e) {
error = e;
}
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
expect(error.details).toBeDefined();
expect(error.details.duplicateVersion).toBeDefined();
const agent = await getAgent({ id: testAgentId });
expect(agent.versions).toHaveLength(2);
}
} finally {
console.error = originalConsoleError;
}
});
test('should track updatedBy when a different user updates an agent', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
const updatingUser = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ name: 'Updated Agent', description: 'Updated description' },
updatingUser.toString(),
);
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.versions[1].updatedBy.toString()).toBe(updatingUser.toString());
expect(updatedAgent.author.toString()).toBe(originalAuthor.toString());
});
test('should include updatedBy even when the original author updates the agent', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ name: 'Updated Agent', description: 'Updated description' },
originalAuthor.toString(),
);
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.versions[1].updatedBy.toString()).toBe(originalAuthor.toString());
expect(updatedAgent.author.toString()).toBe(originalAuthor.toString());
});
test('should track multiple different users updating the same agent', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
const user1 = new mongoose.Types.ObjectId();
const user2 = new mongoose.Types.ObjectId();
const user3 = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
// User 1 makes an update
await updateAgent(
{ id: agentId },
{ name: 'Updated by User 1', description: 'First update' },
user1.toString(),
);
// Original author makes an update
await updateAgent(
{ id: agentId },
{ description: 'Updated by original author' },
originalAuthor.toString(),
);
// User 2 makes an update
await updateAgent(
{ id: agentId },
{ name: 'Updated by User 2', model: 'new-model' },
user2.toString(),
);
// User 3 makes an update
const finalAgent = await updateAgent(
{ id: agentId },
{ description: 'Final update by User 3' },
user3.toString(),
);
expect(finalAgent.versions).toHaveLength(5);
expect(finalAgent.author.toString()).toBe(originalAuthor.toString());
// Check that each version has the correct updatedBy
expect(finalAgent.versions[0].updatedBy).toBeUndefined(); // Initial creation has no updatedBy
expect(finalAgent.versions[1].updatedBy.toString()).toBe(user1.toString());
expect(finalAgent.versions[2].updatedBy.toString()).toBe(originalAuthor.toString());
expect(finalAgent.versions[3].updatedBy.toString()).toBe(user2.toString());
expect(finalAgent.versions[4].updatedBy.toString()).toBe(user3.toString());
// Verify the final state
expect(finalAgent.name).toBe('Updated by User 2');
expect(finalAgent.description).toBe('Final update by User 3');
expect(finalAgent.model).toBe('new-model');
});
test('should preserve original author during agent restoration', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
const updatingUser = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
await updateAgent(
{ id: agentId },
{ name: 'Updated Agent', description: 'Updated description' },
updatingUser.toString(),
);
const { revertAgentVersion } = require('./Agent');
const revertedAgent = await revertAgentVersion({ id: agentId }, 0);
expect(revertedAgent.author.toString()).toBe(originalAuthor.toString());
expect(revertedAgent.name).toBe('Original Agent');
expect(revertedAgent.description).toBe('Original description');
});
});

View File

@@ -100,8 +100,6 @@ const tokenValues = Object.assign(
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-sonnet-4': { prompt: 3, completion: 15 },
'claude-opus-4': { prompt: 15, completion: 75 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
'claude-instant': { prompt: 0.8, completion: 2.4 },
@@ -164,8 +162,6 @@ const cacheTokenValues = {
'claude-3.5-haiku': { write: 1, read: 0.08 },
'claude-3-5-haiku': { write: 1, read: 0.08 },
'claude-3-haiku': { write: 0.3, read: 0.03 },
'claude-sonnet-4': { write: 3.75, read: 0.3 },
'claude-opus-4': { write: 18.75, read: 1.5 },
};
/**

View File

@@ -664,97 +664,3 @@ describe('Grok Model Tests - Pricing', () => {
});
});
});
describe('Claude Model Tests', () => {
it('should return correct prompt and completion rates for Claude 4 models', () => {
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'prompt' })).toBe(
tokenValues['claude-sonnet-4'].prompt,
);
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'completion' })).toBe(
tokenValues['claude-sonnet-4'].completion,
);
expect(getMultiplier({ model: 'claude-opus-4', tokenType: 'prompt' })).toBe(
tokenValues['claude-opus-4'].prompt,
);
expect(getMultiplier({ model: 'claude-opus-4', tokenType: 'completion' })).toBe(
tokenValues['claude-opus-4'].completion,
);
});
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
const modelVariations = [
'claude-sonnet-4',
'claude-sonnet-4-20240229',
'claude-sonnet-4-latest',
'anthropic/claude-sonnet-4',
'claude-sonnet-4/anthropic',
'claude-sonnet-4-preview',
'claude-sonnet-4-20240229-preview',
'claude-opus-4',
'claude-opus-4-20240229',
'claude-opus-4-latest',
'anthropic/claude-opus-4',
'claude-opus-4/anthropic',
'claude-opus-4-preview',
'claude-opus-4-20240229-preview',
];
modelVariations.forEach((model) => {
const valueKey = getValueKey(model);
const isSonnet = model.includes('sonnet');
const expectedKey = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
expect(valueKey).toBe(expectedKey);
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues[expectedKey].prompt);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
tokenValues[expectedKey].completion,
);
});
});
it('should return correct cache rates for Claude 4 models', () => {
expect(getCacheMultiplier({ model: 'claude-sonnet-4', cacheType: 'write' })).toBe(
cacheTokenValues['claude-sonnet-4'].write,
);
expect(getCacheMultiplier({ model: 'claude-sonnet-4', cacheType: 'read' })).toBe(
cacheTokenValues['claude-sonnet-4'].read,
);
expect(getCacheMultiplier({ model: 'claude-opus-4', cacheType: 'write' })).toBe(
cacheTokenValues['claude-opus-4'].write,
);
expect(getCacheMultiplier({ model: 'claude-opus-4', cacheType: 'read' })).toBe(
cacheTokenValues['claude-opus-4'].read,
);
});
it('should handle Claude 4 model cache rates with different prefixes and suffixes', () => {
const modelVariations = [
'claude-sonnet-4',
'claude-sonnet-4-20240229',
'claude-sonnet-4-latest',
'anthropic/claude-sonnet-4',
'claude-sonnet-4/anthropic',
'claude-sonnet-4-preview',
'claude-sonnet-4-20240229-preview',
'claude-opus-4',
'claude-opus-4-20240229',
'claude-opus-4-latest',
'anthropic/claude-opus-4',
'claude-opus-4/anthropic',
'claude-opus-4-preview',
'claude-opus-4-20240229-preview',
];
modelVariations.forEach((model) => {
const isSonnet = model.includes('sonnet');
const expectedKey = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
expect(getCacheMultiplier({ model, cacheType: 'write' })).toBe(
cacheTokenValues[expectedKey].write,
);
expect(getCacheMultiplier({ model, cacheType: 'read' })).toBe(
cacheTokenValues[expectedKey].read,
);
});
});
});

View File

@@ -43,14 +43,15 @@
"@google/generative-ai": "^0.23.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/community": "^0.3.44",
"@langchain/core": "^0.3.57",
"@langchain/google-genai": "^0.2.9",
"@langchain/google-vertexai": "^0.2.9",
"@langchain/community": "^0.3.42",
"@langchain/core": "^0.3.55",
"@langchain/google-genai": "^0.2.8",
"@langchain/google-vertexai": "^0.2.8",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.37",
"@librechat/agents": "^2.4.317",
"@librechat/data-schemas": "*",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"@microsoft/microsoft-graph-client": "^3.0.7",
"axios": "^1.8.2",
"bcryptjs": "^2.4.3",
"cohere-ai": "^7.9.1",
@@ -75,7 +76,6 @@
"ioredis": "^5.3.2",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
"jwks-rsa": "^3.2.0",
"keyv": "^5.3.2",
"keyv-file": "^5.1.2",
"klona": "^2.0.6",
@@ -87,13 +87,13 @@
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^8.12.1",
"multer": "^2.0.0",
"multer": "^1.4.5-lts.1",
"nanoid": "^3.3.7",
"nodemailer": "^6.9.15",
"ollama": "^0.5.0",
"openai": "^4.96.2",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^6.5.0",
"openid-client": "^5.7.1",
"passport": "^0.6.0",
"passport-apple": "^2.0.2",
"passport-discord": "^0.1.4",

View File

@@ -16,17 +16,17 @@ const FinalizationRegistry = global.FinalizationRegistry || null;
*/
const clientRegistry = FinalizationRegistry
? new FinalizationRegistry((heldValue) => {
try {
// This will run when the client is garbage collected
if (heldValue && heldValue.userId) {
logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`);
} else {
logger.debug('[FinalizationRegistry] Cleaning up client');
}
} catch (e) {
// Ignore errors
try {
// This will run when the client is garbage collected
if (heldValue && heldValue.userId) {
logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`);
} else {
logger.debug('[FinalizationRegistry] Cleaning up client');
}
})
} catch (e) {
// Ignore errors
}
})
: null;
/**
@@ -134,8 +134,8 @@ function disposeClient(client) {
if (client.message_delta) {
client.message_delta = null;
}
if (client.isClaudeLatest !== undefined) {
client.isClaudeLatest = null;
if (client.isClaude3 !== undefined) {
client.isClaude3 = null;
}
if (client.useMessages !== undefined) {
client.useMessages = null;

View File

@@ -1,4 +1,3 @@
const openIdClient = require('openid-client');
const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const {
@@ -6,12 +5,9 @@ const {
resetPassword,
setAuthTokens,
requestPasswordReset,
setOpenIDAuthTokens,
} = require('~/server/services/AuthService');
const { findSession, getUserById, deleteAllUserSessions, findUser } = require('~/models');
const { getOpenIdConfig } = require('~/strategies');
const { findSession, getUserById, deleteAllUserSessions } = require('~/models');
const { logger } = require('~/config');
const { isEnabled } = require('~/server/utils');
const registrationController = async (req, res) => {
try {
@@ -59,28 +55,10 @@ const resetPasswordController = async (req, res) => {
const refreshController = async (req, res) => {
const refreshToken = req.headers.cookie ? cookies.parse(req.headers.cookie).refreshToken : null;
const token_provider = req.headers.cookie
? cookies.parse(req.headers.cookie).token_provider
: null;
if (!refreshToken) {
return res.status(200).send('Refresh token not provided');
}
if (token_provider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS) === true) {
try {
const openIdConfig = getOpenIdConfig();
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
const claims = tokenset.claims();
const user = await findUser({ email: claims.email });
if (!user) {
return res.status(401).redirect('/login');
}
const token = setOpenIDAuthTokens(tokenset, res);
return res.status(200).send({ token, user });
} catch (error) {
logger.error('[refreshController] OpenID token refresh error', error);
return res.status(403).send('Invalid OpenID refresh token');
}
}
try {
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const user = await getUserById(payload.id, '-password -__v -totpSecret');

View File

@@ -1,10 +1,4 @@
const {
Tools,
Constants,
FileSources,
webSearchKeys,
extractWebSearchEnvVars,
} = require('librechat-data-provider');
const { FileSources } = require('librechat-data-provider');
const {
Balance,
getFiles,
@@ -89,6 +83,7 @@ const deleteUserFiles = async (req) => {
const updateUserPluginsController = async (req, res) => {
const { user } = req;
const { pluginKey, action, auth, isEntityTool } = req.body;
let authService;
try {
if (!isEntityTool) {
const userPluginsService = await updateUserPluginsService(user, pluginKey, action);
@@ -100,55 +95,32 @@ const updateUserPluginsController = async (req, res) => {
}
}
if (auth == null) {
return res.status(200).send();
}
let keys = Object.keys(auth);
if (keys.length === 0 && pluginKey !== Tools.web_search) {
return res.status(200).send();
}
const values = Object.values(auth);
/** @type {number} */
let status = 200;
/** @type {string} */
let message;
/** @type {IPluginAuth | Error} */
let authService;
if (pluginKey === Tools.web_search) {
/** @type {TCustomConfig['webSearch']} */
const webSearchConfig = req.app.locals?.webSearch;
keys = extractWebSearchEnvVars({
keys: action === 'install' ? keys : webSearchKeys,
config: webSearchConfig,
});
}
if (action === 'install') {
for (let i = 0; i < keys.length; i++) {
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
({ status, message } = authService);
if (auth) {
const keys = Object.keys(auth);
const values = Object.values(auth);
if (action === 'install' && keys.length > 0) {
for (let i = 0; i < keys.length; i++) {
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
const { status, message } = authService;
res.status(status).send({ message });
}
}
}
} else if (action === 'uninstall') {
for (let i = 0; i < keys.length; i++) {
authService = await deleteUserPluginAuth(user.id, keys[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
({ status, message } = authService);
if (action === 'uninstall' && keys.length > 0) {
for (let i = 0; i < keys.length; i++) {
authService = await deleteUserPluginAuth(user.id, keys[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
const { status, message } = authService;
res.status(status).send({ message });
}
}
}
}
if (status === 200) {
return res.status(status).send();
}
res.status(status).send({ message });
res.status(200).send();
} catch (err) {
logger.error('[updateUserPluginsController]', err);
return res.status(500).json({ message: 'Something went wrong.' });

View File

@@ -237,30 +237,6 @@ function createToolEndCallback({ req, res, artifactPromises }) {
return;
}
if (output.artifact[Tools.web_search]) {
artifactPromises.push(
(async () => {
const name = `${output.name}_${output.tool_call_id}_${nanoid()}`;
const attachment = {
name,
type: Tools.web_search,
messageId: metadata.run_id,
toolCallId: output.tool_call_id,
conversationId: metadata.thread_id,
[Tools.web_search]: { ...output.artifact[Tools.web_search] },
};
if (!res.headersSent) {
return attachment;
}
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
return attachment;
})().catch((error) => {
logger.error('Error processing artifact content:', error);
return null;
}),
);
}
if (output.artifact.content) {
/** @type {FormattedContent[]} */
const content = output.artifact.content;

View File

@@ -39,6 +39,9 @@ const BaseClient = require('~/app/clients/BaseClient');
const { logger, sendEvent } = require('~/config');
const { createRun } = require('./run');
/** @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex */
/** @typedef {import('@langchain/core/runnables').RunnableConfig} RunnableConfig */
/**
* @param {ServerRequest} req
* @param {Agent} agent
@@ -540,7 +543,7 @@ class AgentClient extends BaseClient {
}
async chatCompletion({ payload, abortController = null }) {
/** @type {Partial<GraphRunnableConfig>} */
/** @type {Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string }} */
let config;
/** @type {ReturnType<createRun>} */
let run;

View File

@@ -23,7 +23,6 @@ const { updateAction, getActions } = require('~/models/Action');
const { updateAgentProjects } = require('~/models/Agent');
const { getProjectByName } = require('~/models/Project');
const { deleteFileByFilter } = require('~/models/File');
const { revertAgentVersion } = require('~/models/Agent');
const { logger } = require('~/config');
const systemTools = {
@@ -105,13 +104,11 @@ const getAgentHandler = async (req, res) => {
return res.status(404).json({ error: 'Agent not found' });
}
agent.version = agent.versions ? agent.versions.length : 0;
if (agent.avatar && agent.avatar?.source === FileSources.s3) {
const originalUrl = agent.avatar.filepath;
agent.avatar.filepath = await refreshS3Url(agent.avatar);
if (originalUrl !== agent.avatar.filepath) {
await updateAgent({ id }, { avatar: agent.avatar }, req.user.id);
await updateAgent({ id }, { avatar: agent.avatar });
}
}
@@ -130,7 +127,6 @@ const getAgentHandler = async (req, res) => {
author: agent.author,
projectIds: agent.projectIds,
isCollaborative: agent.isCollaborative,
version: agent.version,
});
}
return res.status(200).json(agent);
@@ -169,9 +165,7 @@ const updateAgentHandler = async (req, res) => {
}
let updatedAgent =
Object.keys(updateData).length > 0
? await updateAgent({ id }, updateData, req.user.id)
: existingAgent;
Object.keys(updateData).length > 0 ? await updateAgent({ id }, updateData) : existingAgent;
if (projectIds || removeProjectIds) {
updatedAgent = await updateAgentProjects({
@@ -193,14 +187,6 @@ const updateAgentHandler = async (req, res) => {
return res.json(updatedAgent);
} catch (error) {
logger.error('[/Agents/:id] Error updating Agent', error);
if (error.statusCode === 409) {
return res.status(409).json({
error: error.message,
details: error.details,
});
}
res.status(500).json({ error: error.message });
}
};
@@ -407,7 +393,7 @@ const uploadAgentAvatarHandler = async (req, res) => {
},
};
promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data, req.user.id));
promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data));
const resolved = await Promise.all(promises);
res.status(201).json(resolved[0]);
@@ -425,66 +411,6 @@ const uploadAgentAvatarHandler = async (req, res) => {
}
};
/**
* Reverts an agent to a previous version from its version history.
* @route PATCH /agents/:id/revert
* @param {object} req - Express Request object
* @param {object} req.params - Request parameters
* @param {string} req.params.id - The ID of the agent to revert
* @param {object} req.body - Request body
* @param {number} req.body.version_index - The index of the version to revert to
* @param {object} req.user - Authenticated user information
* @param {string} req.user.id - User ID
* @param {string} req.user.role - User role
* @param {ServerResponse} res - Express Response object
* @returns {Promise<Agent>} 200 - The updated agent after reverting to the specified version
* @throws {Error} 400 - If version_index is missing
* @throws {Error} 403 - If user doesn't have permission to modify the agent
* @throws {Error} 404 - If agent not found
* @throws {Error} 500 - If there's an internal server error during the reversion process
*/
const revertAgentVersionHandler = async (req, res) => {
try {
const { id } = req.params;
const { version_index } = req.body;
if (version_index === undefined) {
return res.status(400).json({ error: 'version_index is required' });
}
const isAdmin = req.user.role === SystemRoles.ADMIN;
const existingAgent = await getAgent({ id });
if (!existingAgent) {
return res.status(404).json({ error: 'Agent not found' });
}
const isAuthor = existingAgent.author.toString() === req.user.id;
const hasEditPermission = existingAgent.isCollaborative || isAdmin || isAuthor;
if (!hasEditPermission) {
return res.status(403).json({
error: 'You do not have permission to modify this non-collaborative agent',
});
}
const updatedAgent = await revertAgentVersion({ id }, version_index);
if (updatedAgent.author) {
updatedAgent.author = updatedAgent.author.toString();
}
if (updatedAgent.author !== req.user.id) {
delete updatedAgent.author;
}
return res.json(updatedAgent);
} catch (error) {
logger.error('[/agents/:id/revert] Error reverting Agent version', error);
res.status(500).json({ error: error.message });
}
};
module.exports = {
createAgent: createAgentHandler,
getAgent: getAgentHandler,
@@ -493,5 +419,4 @@ module.exports = {
deleteAgent: deleteAgentHandler,
getListAgents: getListAgentsHandler,
uploadAgentAvatar: uploadAgentAvatarHandler,
revertAgentVersion: revertAgentVersionHandler,
};

View File

@@ -326,15 +326,8 @@ const chatV1 = async (req, res) => {
file_ids = files.map(({ file_id }) => file_id);
if (file_ids.length || thread_file_ids.length) {
userMessage.file_ids = file_ids;
attachedFileIds = new Set([...file_ids, ...thread_file_ids]);
if (endpoint === EModelEndpoint.azureAssistants) {
userMessage.attachments = Array.from(attachedFileIds).map((file_id) => ({
file_id,
tools: [{ type: 'file_search' }],
}));
} else {
userMessage.file_ids = Array.from(attachedFileIds);
}
}
};

View File

@@ -1,5 +1,5 @@
const cookies = require('cookie');
const { getOpenIdConfig } = require('~/strategies');
const { Issuer } = require('openid-client');
const { logoutUser } = require('~/server/services/AuthService');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
@@ -10,29 +10,20 @@ const logoutController = async (req, res) => {
const logout = await logoutUser(req, refreshToken);
const { status, message } = logout;
res.clearCookie('refreshToken');
res.clearCookie('token_provider');
const response = { message };
if (
req.user.openidId != null &&
isEnabled(process.env.OPENID_USE_END_SESSION_ENDPOINT) &&
process.env.OPENID_ISSUER
) {
const openIdConfig = getOpenIdConfig();
if (!openIdConfig) {
const issuer = await Issuer.discover(process.env.OPENID_ISSUER);
const redirect = issuer.metadata.end_session_endpoint;
if (!redirect) {
logger.warn(
'[logoutController] OpenID config not found. Please verify that the open id configuration and initialization are correct.',
'[logoutController] end_session_endpoint not found in OpenID issuer metadata. Please verify that the issuer is correct.',
);
} else {
const endSessionEndpoint = openIdConfig
? openIdConfig.serverMetadata().end_session_endpoint
: null;
if (endSessionEndpoint) {
response.redirect = endSessionEndpoint;
} else {
logger.warn(
'[logoutController] end_session_endpoint not found in OpenID issuer metadata. Please verify that the issuer is correct.',
);
}
response.redirect = redirect;
}
}
return res.status(status).send(response);

View File

@@ -6,7 +6,6 @@ const {
Permissions,
ToolCallTypes,
PermissionTypes,
loadWebSearchAuth,
} = require('librechat-data-provider');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
const { processCodeOutput } = require('~/server/services/Files/Code/process');
@@ -25,36 +24,6 @@ const toolAccessPermType = {
[Tools.execute_code]: PermissionTypes.RUN_CODE,
};
/**
* Verifies web search authentication, ensuring each category has at least
* one fully authenticated service.
*
* @param {ServerRequest} req - The request object
* @param {ServerResponse} res - The response object
* @returns {Promise<void>} A promise that resolves when the function has completed
*/
const verifyWebSearchAuth = async (req, res) => {
try {
const userId = req.user.id;
/** @type {TCustomConfig['webSearch']} */
const webSearchConfig = req.app.locals?.webSearch || {};
const result = await loadWebSearchAuth({
userId,
loadAuthValues,
webSearchConfig,
throwError: false,
});
return res.status(200).json({
authenticated: result.authenticated,
authTypes: result.authTypes,
});
} catch (error) {
console.error('Error in verifyWebSearchAuth:', error);
return res.status(500).json({ message: error.message });
}
};
/**
* @param {ServerRequest} req - The request object, containing information about the HTTP request.
* @param {ServerResponse} res - The response object, used to send back the desired HTTP response.
@@ -63,9 +32,6 @@ const verifyWebSearchAuth = async (req, res) => {
const verifyToolAuth = async (req, res) => {
try {
const { toolId } = req.params;
if (toolId === Tools.web_search) {
return await verifyWebSearchAuth(req, res);
}
const authFields = fieldsMap[toolId];
if (!authFields) {
res.status(404).json({ message: 'Tool not found' });

View File

@@ -24,13 +24,10 @@ const routes = require('./routes');
const { PORT, HOST, ALLOW_SOCIAL_LOGIN, DISABLE_COMPRESSION, TRUST_PROXY } = process.env ?? {};
// Allow PORT=0 to be used for automatic free port assignment
const port = isNaN(Number(PORT)) ? 3080 : Number(PORT);
const port = Number(PORT) || 3080;
const host = HOST || 'localhost';
const trusted_proxy = Number(TRUST_PROXY) || 1; /* trust first proxy by default */
const app = express();
const startServer = async () => {
if (typeof Bun !== 'undefined') {
axios.defaults.headers.common['Accept-Encoding'] = 'gzip';
@@ -39,9 +36,8 @@ const startServer = async () => {
logger.info('Connected to MongoDB');
await indexSync();
const app = express();
app.disable('x-powered-by');
app.set('trust proxy', trusted_proxy);
await AppService(app);
const indexPath = path.join(app.locals.paths.dist, 'index.html');
@@ -53,29 +49,28 @@ const startServer = async () => {
app.use(noIndex);
app.use(errorController);
app.use(express.json({ limit: '3mb' }));
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
app.use(mongoSanitize());
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
app.use(staticCache(app.locals.paths.dist));
app.use(staticCache(app.locals.paths.fonts));
app.use(staticCache(app.locals.paths.assets));
app.set('trust proxy', trusted_proxy);
app.use(cors());
app.use(cookieParser());
if (!isEnabled(DISABLE_COMPRESSION)) {
app.use(compression());
} else {
console.warn('Response compression has been disabled via DISABLE_COMPRESSION.');
}
// Serve static assets with aggressive caching
app.use(staticCache(app.locals.paths.dist));
app.use(staticCache(app.locals.paths.fonts));
app.use(staticCache(app.locals.paths.assets));
if (!ALLOW_SOCIAL_LOGIN) {
console.warn('Social logins are disabled. Set ALLOW_SOCIAL_LOGIN=true to enable them.');
console.warn(
'Social logins are disabled. Set Environment Variable "ALLOW_SOCIAL_LOGIN" to true to enable them.',
);
}
/* OAUTH */
app.use(passport.initialize());
passport.use(jwtLogin());
passport.use(await jwtLogin());
passport.use(passportLogin());
/* LDAP Auth */
@@ -84,7 +79,7 @@ const startServer = async () => {
}
if (isEnabled(ALLOW_SOCIAL_LOGIN)) {
await configureSocialLogins(app);
configureSocialLogins(app);
}
app.use('/oauth', routes.oauth);
@@ -133,7 +128,7 @@ const startServer = async () => {
});
app.listen(port, host, () => {
if (host === '0.0.0.0') {
if (host == '0.0.0.0') {
logger.info(
`Server listening on all interfaces at port ${port}. Use http://localhost:${port} to access it`,
);
@@ -181,6 +176,3 @@ process.on('uncaughtException', (err) => {
process.exit(1);
});
// export app for easier testing purposes
module.exports = app;

View File

@@ -1,78 +0,0 @@
const fs = require('fs');
const path = require('path');
const request = require('supertest');
const { MongoMemoryServer } = require('mongodb-memory-server');
const mongoose = require('mongoose');
describe('Server Configuration', () => {
// Increase the default timeout to allow for Mongo cleanup
jest.setTimeout(30_000);
let mongoServer;
let app;
/** Mocked fs.readFileSync for index.html */
const originalReadFileSync = fs.readFileSync;
beforeAll(() => {
fs.readFileSync = function (filepath, options) {
if (filepath.includes('index.html')) {
return '<!DOCTYPE html><html><head><title>LibreChat</title></head><body><div id="root"></div></body></html>';
}
return originalReadFileSync(filepath, options);
};
});
afterAll(() => {
// Restore original fs.readFileSync
fs.readFileSync = originalReadFileSync;
});
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
process.env.MONGO_URI = mongoServer.getUri();
process.env.PORT = '0'; // Use a random available port
app = require('~/server');
// Wait for the app to be healthy
await healthCheckPoll(app);
});
afterAll(async () => {
await mongoServer.stop();
await mongoose.disconnect();
});
it('should return OK for /health', async () => {
const response = await request(app).get('/health');
expect(response.status).toBe(200);
expect(response.text).toBe('OK');
});
it('should not cache index page', async () => {
const response = await request(app).get('/');
expect(response.status).toBe(200);
expect(response.headers['cache-control']).toBe('no-cache, no-store, must-revalidate');
expect(response.headers['pragma']).toBe('no-cache');
expect(response.headers['expires']).toBe('0');
});
});
// Polls the /health endpoint every 30ms for up to 10 seconds to wait for the server to start completely
async function healthCheckPoll(app, retries = 0) {
const maxRetries = Math.floor(10000 / 30); // 10 seconds / 30ms
try {
const response = await request(app).get('/health');
if (response.status === 200) {
return; // App is healthy
}
} catch (error) {
// Ignore connection errors during polling
}
if (retries < maxRetries) {
await new Promise((resolve) => setTimeout(resolve, 30));
await healthCheckPoll(app, retries + 1);
} else {
throw new Error('App did not become healthy within 10 seconds.');
}
}

View File

@@ -1,13 +1,9 @@
const cookies = require('cookie');
const { isEnabled } = require('~/server/utils');
const passport = require('passport');
// This middleware does not require authentication,
// but if the user is authenticated, it will set the user object.
const optionalJwtAuth = (req, res, next) => {
const cookieHeader = req.headers.cookie;
const tokenProvider = cookieHeader ? cookies.parse(cookieHeader).token_provider : null;
const callback = (err, user) => {
passport.authenticate('jwt', { session: false }, (err, user) => {
if (err) {
return next(err);
}
@@ -15,11 +11,7 @@ const optionalJwtAuth = (req, res, next) => {
req.user = user;
}
next();
};
if (tokenProvider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS)) {
return passport.authenticate('openidJwt', { session: false }, callback)(req, res, next);
}
passport.authenticate('jwt', { session: false }, callback)(req, res, next);
})(req, res, next);
};
module.exports = optionalJwtAuth;

View File

@@ -1,23 +1,5 @@
const passport = require('passport');
const cookies = require('cookie');
const { isEnabled } = require('~/server/utils');
/**
* Custom Middleware to handle JWT authentication, with support for OpenID token reuse
* Switches between JWT and OpenID authentication based on cookies and environment settings
*/
const requireJwtAuth = (req, res, next) => {
// Check if token provider is specified in cookies
const cookieHeader = req.headers.cookie;
const tokenProvider = cookieHeader ? cookies.parse(cookieHeader).token_provider : null;
// Use OpenID authentication if token provider is OpenID and OPENID_REUSE_TOKENS is enabled
if (tokenProvider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS)) {
return passport.authenticate('openidJwt', { session: false })(req, res, next);
}
// Default to standard JWT authentication
return passport.authenticate('jwt', { session: false })(req, res, next);
};
const requireJwtAuth = passport.authenticate('jwt', { session: false });
module.exports = requireJwtAuth;

View File

@@ -1,11 +1,11 @@
jest.mock('~/cache/getLogStores');
const request = require('supertest');
const express = require('express');
const configRoute = require('../config');
const routes = require('../');
// file deepcode ignore UseCsurfForExpress/test: test
const app = express();
app.disable('x-powered-by');
app.use('/api/config', configRoute);
app.use('/api/config', routes.config);
afterEach(() => {
delete process.env.APP_TITLE;

View File

@@ -107,7 +107,7 @@ router.post('/:agent_id', async (req, res) => {
.filter((tool) => !(tool && (tool.includes(domain) || tool.includes(action_id))))
.concat(functions.map((tool) => `${tool.function.name}${actionDelimiter}${domain}`));
const updatedAgent = await updateAgent(agentQuery, { tools, actions }, req.user.id);
const updatedAgent = await updateAgent(agentQuery, { tools, actions });
// Only update user field for new actions
const actionUpdateData = { metadata, agent_id };
@@ -172,7 +172,7 @@ router.delete('/:agent_id/:action_id', async (req, res) => {
const updatedTools = tools.filter((tool) => !(tool && tool.includes(domain)));
await updateAgent(agentQuery, { tools: updatedTools, actions: updatedActions }, req.user.id);
await updateAgent(agentQuery, { tools: updatedTools, actions: updatedActions });
// If admin, can delete any action, otherwise only user's actions
const actionQuery = admin ? { action_id } : { action_id, user: req.user.id };
await deleteAction(actionQuery);

View File

@@ -78,15 +78,6 @@ router.post('/:id/duplicate', checkAgentCreate, v1.duplicateAgent);
*/
router.delete('/:id', checkAgentCreate, v1.deleteAgent);
/**
* Reverts an agent to a previous version.
* @route POST /agents/:id/revert
* @param {string} req.params.id - Agent identifier.
* @param {number} req.body.version_index - Index of the version to revert to.
* @returns {Agent} 200 - success response - application/json
*/
router.post('/:id/revert', checkGlobalAgentShare, v1.revertAgentVersion);
/**
* Returns a list of agents.
* @route GET /agents

View File

@@ -85,26 +85,6 @@ router.get('/', async function (req, res) {
bundlerURL: process.env.SANDPACK_BUNDLER_URL,
staticBundlerURL: process.env.SANDPACK_STATIC_BUNDLER_URL,
};
/** @type {TCustomConfig['webSearch']} */
const webSearchConfig = req.app.locals.webSearch;
if (
webSearchConfig != null &&
(webSearchConfig.searchProvider ||
webSearchConfig.scraperType ||
webSearchConfig.rerankerType)
) {
payload.webSearch = {};
}
if (webSearchConfig?.searchProvider) {
payload.webSearch.searchProvider = webSearchConfig.searchProvider;
}
if (webSearchConfig?.scraperType) {
payload.webSearch.scraperType = webSearchConfig.scraperType;
}
if (webSearchConfig?.rerankerType) {
payload.webSearch.rerankerType = webSearchConfig.rerankerType;
}
if (ldap) {
payload.ldap = ldap;

View File

@@ -74,7 +74,7 @@ router.post('/gen_title', async (req, res) => {
res.status(200).json({ title });
} else {
res.status(404).json({
message: "Title not found or method not implemented for the conversation's endpoint",
message: 'Title not found or method not implemented for the conversation\'s endpoint',
});
}
});

View File

@@ -121,14 +121,6 @@ router.delete('/', async (req, res) => {
await processDeleteRequest({ req, files: assistantFiles });
res.status(200).json({ message: 'File associations removed successfully from assistant' });
return;
} else if (
req.body.assistant_id &&
req.body.files?.[0]?.filepath === EModelEndpoint.azureAssistants
) {
await processDeleteRequest({ req, files: req.body.files });
return res
.status(200)
.json({ message: 'File associations removed successfully from Azure Assistant' });
}
await processDeleteRequest({ req, files: dbFiles });

View File

@@ -8,9 +8,8 @@ const {
setBalanceConfig,
checkDomainAllowed,
} = require('~/server/middleware');
const { setAuthTokens, setOpenIDAuthTokens } = require('~/server/services/AuthService');
const { setAuthTokens } = require('~/server/services/AuthService');
const { logger } = require('~/config');
const { isEnabled } = require('~/server/utils');
const router = express.Router();
@@ -29,15 +28,7 @@ const oauthHandler = async (req, res) => {
if (req.banned) {
return;
}
if (
req.user &&
req.user.provider == 'openid' &&
isEnabled(process.env.OPENID_REUSE_TOKENS) === true
) {
setOpenIDAuthTokens(req.user.tokenset, res);
} else {
await setAuthTokens(req.user._id, res);
}
await setAuthTokens(req.user._id, res);
res.redirect(domains.client);
} catch (err) {
logger.error('Error in setting authentication tokens:', err);

View File

@@ -25,7 +25,6 @@ jest.mock('./start/checks', () => ({
checkHealth: jest.fn(),
checkConfig: jest.fn(),
checkAzureVariables: jest.fn(),
checkWebSearchConfig: jest.fn(),
}));
const AppService = require('./AppService');

View File

@@ -1,18 +1,11 @@
const {
FileSources,
EModelEndpoint,
loadOCRConfig,
processMCPEnv,
EModelEndpoint,
getConfigDefaults,
loadWebSearchConfig,
} = require('librechat-data-provider');
const {
checkHealth,
checkConfig,
checkVariables,
checkAzureVariables,
checkWebSearchConfig,
} = require('./start/checks');
const { checkVariables, checkHealth, checkConfig, checkAzureVariables } = require('./start/checks');
const { azureAssistantsDefaults, assistantsConfigSetup } = require('./start/assistants');
const { initializeAzureBlobService } = require('./Files/Azure/initialize');
const { initializeFirebase } = require('./Files/Firebase/initialize');
@@ -42,8 +35,6 @@ const AppService = async (app) => {
const configDefaults = getConfigDefaults();
const ocr = loadOCRConfig(config.ocr);
const webSearch = loadWebSearchConfig(config.webSearch);
checkWebSearchConfig(webSearch);
const filteredTools = config.filteredTools;
const includedTools = config.includedTools;
const fileStrategy = config.fileStrategy ?? configDefaults.fileStrategy;
@@ -88,7 +79,6 @@ const AppService = async (app) => {
const defaultLocals = {
ocr,
paths,
webSearch,
fileStrategy,
socialLogins,
filteredTools,

View File

@@ -141,14 +141,6 @@ describe('AppService', () => {
balance: { enabled: true },
filteredTools: undefined,
includedTools: undefined,
webSearch: {
cohereApiKey: '${COHERE_API_KEY}',
firecrawlApiKey: '${FIRECRAWL_API_KEY}',
firecrawlApiUrl: '${FIRECRAWL_API_URL}',
jinaApiKey: '${JINA_API_KEY}',
safeSearch: 1,
serperApiKey: '${SERPER_API_KEY}',
},
});
});
@@ -545,7 +537,7 @@ describe('AppService updating app.locals and issuing warnings', () => {
const { logger } = require('~/config');
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
"The 'assistants' endpoint has both 'supportedIds' and 'excludedIds' defined.",
'The \'assistants\' endpoint has both \'supportedIds\' and \'excludedIds\' defined.',
),
);
});
@@ -567,7 +559,7 @@ describe('AppService updating app.locals and issuing warnings', () => {
const { logger } = require('~/config');
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
"The 'assistants' endpoint has both 'privateAssistants' and 'supportedIds' or 'excludedIds' defined.",
'The \'assistants\' endpoint has both \'privateAssistants\' and \'supportedIds\' or \'excludedIds\' defined.',
),
);
});

View File

@@ -377,62 +377,13 @@ const setAuthTokens = async (userId, res, sessionId = null) => {
secure: isProduction,
sameSite: 'strict',
});
res.cookie('token_provider', 'librechat', {
expires: new Date(refreshTokenExpires),
httpOnly: true,
secure: isProduction,
sameSite: 'strict',
});
return token;
} catch (error) {
logger.error('[setAuthTokens] Error in setting authentication tokens:', error);
throw error;
}
};
/**
* @function setOpenIDAuthTokens
* Set OpenID Authentication Tokens
* //type tokenset from openid-client
* @param {import('openid-client').TokenEndpointResponse & import('openid-client').TokenEndpointResponseHelpers} tokenset
* - The tokenset object containing access and refresh tokens
* @param {Object} res - response object
* @returns {String} - access token
*/
const setOpenIDAuthTokens = (tokenset, res) => {
try {
if (!tokenset) {
logger.error('[setOpenIDAuthTokens] No tokenset found in request');
return;
}
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
const expiryInMilliseconds = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7; // 7 days default
const expirationDate = new Date(Date.now() + expiryInMilliseconds);
if (tokenset == null) {
logger.error('[setOpenIDAuthTokens] No tokenset found in request');
return;
}
if (!tokenset.access_token || !tokenset.refresh_token) {
logger.error('[setOpenIDAuthTokens] No access or refresh token found in tokenset');
return;
}
res.cookie('refreshToken', tokenset.refresh_token, {
expires: expirationDate,
httpOnly: true,
secure: isProduction,
sameSite: 'strict',
});
res.cookie('token_provider', 'openid', {
expires: expirationDate,
httpOnly: true,
secure: isProduction,
sameSite: 'strict',
});
return tokenset.access_token;
} catch (error) {
logger.error('[setOpenIDAuthTokens] Error in setting authentication tokens:', error);
throw error;
}
};
/**
* Resend Verification Email
@@ -501,5 +452,4 @@ module.exports = {
resetPassword,
requestPasswordReset,
resendVerificationEmail,
setOpenIDAuthTokens,
};

View File

@@ -10,7 +10,17 @@ const getLogStores = require('~/cache/getLogStores');
* */
async function getCustomConfig() {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
return (await cache.get(CacheKeys.CUSTOM_CONFIG)) || (await loadCustomConfig());
let customConfig = await cache.get(CacheKeys.CUSTOM_CONFIG);
if (!customConfig) {
customConfig = await loadCustomConfig();
}
if (!customConfig) {
return null;
}
return customConfig;
}
/**

View File

@@ -29,14 +29,7 @@ async function loadConfigEndpoints(req) {
for (let i = 0; i < customEndpoints.length; i++) {
const endpoint = customEndpoints[i];
const {
baseURL,
apiKey,
name: configName,
iconURL,
modelDisplayLabel,
customParams,
} = endpoint;
const { baseURL, apiKey, name: configName, iconURL, modelDisplayLabel } = endpoint;
const name = normalizeEndpointName(configName);
const resolvedApiKey = extractEnvVariable(apiKey);
@@ -48,7 +41,6 @@ async function loadConfigEndpoints(req) {
userProvideURL: isUserProvided(resolvedBaseURL),
modelDisplayLabel,
iconURL,
customParams,
};
}
}

View File

@@ -1,18 +1,10 @@
const path = require('path');
const {
CacheKeys,
configSchema,
EImageOutputType,
validateSettingDefinitions,
agentParamSettings,
paramSettings,
} = require('librechat-data-provider');
const { CacheKeys, configSchema, EImageOutputType } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const loadYaml = require('~/utils/loadYaml');
const { logger } = require('~/config');
const axios = require('axios');
const yaml = require('js-yaml');
const keyBy = require('lodash/keyBy');
const projectRoot = path.resolve(__dirname, '..', '..', '..', '..');
const defaultConfigPath = path.resolve(projectRoot, 'librechat.yaml');
@@ -113,10 +105,6 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
logger.debug('Custom config:', customConfig);
}
(customConfig.endpoints?.custom ?? [])
.filter((endpoint) => endpoint.customParams)
.forEach((endpoint) => parseCustomParams(endpoint.name, endpoint.customParams));
if (customConfig.cache) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig);
@@ -129,52 +117,4 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
return customConfig;
}
// Validate and fill out missing values for custom parameters
function parseCustomParams(endpointName, customParams) {
const paramEndpoint = customParams.defaultParamsEndpoint;
customParams.paramDefinitions = customParams.paramDefinitions || [];
// Checks if `defaultParamsEndpoint` is a key in `paramSettings`.
const validEndpoints = new Set([
...Object.keys(paramSettings),
...Object.keys(agentParamSettings),
]);
if (!validEndpoints.has(paramEndpoint)) {
throw new Error(
`defaultParamsEndpoint of "${endpointName}" endpoint is invalid. ` +
`Valid options are ${Array.from(validEndpoints).join(', ')}`,
);
}
// creates default param maps
const regularParams = paramSettings[paramEndpoint] ?? [];
const agentParams = agentParamSettings[paramEndpoint] ?? [];
const defaultParams = regularParams.concat(agentParams);
const defaultParamsMap = keyBy(defaultParams, 'key');
// TODO: Remove this check once we support new parameters not part of default parameters.
// Checks if every key in `paramDefinitions` is valid.
const validKeys = new Set(Object.keys(defaultParamsMap));
const paramKeys = customParams.paramDefinitions.map((param) => param.key);
if (paramKeys.some((key) => !validKeys.has(key))) {
throw new Error(
`paramDefinitions of "${endpointName}" endpoint contains invalid key(s). ` +
`Valid parameter keys are ${Array.from(validKeys).join(', ')}`,
);
}
// Fill out missing values for custom param definitions
customParams.paramDefinitions = customParams.paramDefinitions.map((param) => {
return { ...defaultParamsMap[param.key], ...param, optionType: 'custom' };
});
try {
validateSettingDefinitions(customParams.paramDefinitions);
} catch (e) {
throw new Error(
`Custom parameter definitions for "${endpointName}" endpoint is malformed: ${e.message}`,
);
}
}
module.exports = loadCustomConfig;

View File

@@ -1,34 +1,6 @@
jest.mock('axios');
jest.mock('~/cache/getLogStores');
jest.mock('~/utils/loadYaml');
jest.mock('librechat-data-provider', () => {
const actual = jest.requireActual('librechat-data-provider');
return {
...actual,
paramSettings: { foo: {}, bar: {}, custom: {} },
agentParamSettings: {
custom: [],
google: [
{
key: 'pressure',
type: 'string',
component: 'input',
},
{
key: 'temperature',
type: 'number',
component: 'slider',
default: 0.5,
range: {
min: 0,
max: 2,
step: 0.01,
},
},
],
},
};
});
const axios = require('axios');
const loadCustomConfig = require('./loadCustomConfig');
@@ -178,126 +150,4 @@ describe('loadCustomConfig', () => {
expect(logger.info).toHaveBeenCalledWith(JSON.stringify(mockConfig, null, 2));
expect(logger.debug).toHaveBeenCalledWith('Custom config:', mockConfig);
});
describe('parseCustomParams', () => {
const mockConfig = {
version: '1.0',
cache: false,
endpoints: {
custom: [
{
name: 'Google',
apiKey: 'user_provided',
customParams: {},
},
],
},
};
async function loadCustomParams(customParams) {
mockConfig.endpoints.custom[0].customParams = customParams;
loadYaml.mockReturnValue(mockConfig);
return await loadCustomConfig();
}
beforeEach(() => {
jest.resetAllMocks();
process.env.CONFIG_PATH = 'validConfig.yaml';
});
it('returns no error when customParams is undefined', async () => {
const result = await loadCustomParams(undefined);
expect(result).toEqual(mockConfig);
});
it('returns no error when customParams is valid', async () => {
const result = await loadCustomParams({
defaultParamsEndpoint: 'google',
paramDefinitions: [
{
key: 'temperature',
default: 0.5,
},
],
});
expect(result).toEqual(mockConfig);
});
it('throws an error when paramDefinitions contain unsupported keys', async () => {
const malformedCustomParams = {
defaultParamsEndpoint: 'google',
paramDefinitions: [
{ key: 'temperature', default: 0.5 },
{ key: 'unsupportedKey', range: 0.5 },
],
};
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
'paramDefinitions of "Google" endpoint contains invalid key(s). Valid parameter keys are pressure, temperature',
);
});
it('throws an error when paramDefinitions is malformed', async () => {
const malformedCustomParams = {
defaultParamsEndpoint: 'google',
paramDefinitions: [
{
key: 'temperature',
type: 'noomba',
component: 'inpoot',
optionType: 'custom',
},
],
};
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
/Custom parameter definitions for "Google" endpoint is malformed:/,
);
});
it('throws an error when defaultParamsEndpoint is not provided', async () => {
const malformedCustomParams = { defaultParamsEndpoint: undefined };
await expect(loadCustomParams(malformedCustomParams)).rejects.toThrow(
'defaultParamsEndpoint of "Google" endpoint is invalid. Valid options are foo, bar, custom, google',
);
});
it('fills the paramDefinitions with missing values', async () => {
const customParams = {
defaultParamsEndpoint: 'google',
paramDefinitions: [
{ key: 'temperature', default: 0.7, range: { min: 0.1, max: 0.9, step: 0.1 } },
{ key: 'pressure', component: 'textarea' },
],
};
const parsedConfig = await loadCustomParams(customParams);
const paramDefinitions = parsedConfig.endpoints.custom[0].customParams.paramDefinitions;
expect(paramDefinitions).toEqual([
{
columnSpan: 1,
component: 'slider',
default: 0.7, // overridden
includeInput: true,
key: 'temperature',
label: 'temperature',
optionType: 'custom',
range: {
// overridden
max: 0.9,
min: 0.1,
step: 0.1,
},
type: 'number',
},
{
columnSpan: 1,
component: 'textarea', // overridden
key: 'pressure',
label: 'pressure',
optionType: 'custom',
placeholder: '',
type: 'string',
},
]);
});
});
});

View File

@@ -15,14 +15,20 @@ function checkPromptCacheSupport(modelName) {
return false;
}
return (
/claude-3[-.]7/.test(modelMatch) ||
/claude-3[-.]5-(?:sonnet|haiku)/.test(modelMatch) ||
/claude-3-(?:sonnet|haiku|opus)?/.test(modelMatch) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(modelMatch) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(modelMatch)
);
if (
modelMatch === 'claude-3-7-sonnet' ||
modelMatch === 'claude-3-5-sonnet' ||
modelMatch === 'claude-3-5-haiku' ||
modelMatch === 'claude-3-haiku' ||
modelMatch === 'claude-3-opus' ||
modelMatch === 'claude-3.7-sonnet' ||
modelMatch === 'claude-3.5-sonnet' ||
modelMatch === 'claude-3.5-haiku'
) {
return true;
}
return false;
}
/**
@@ -45,14 +51,6 @@ function getClaudeHeaders(model, supportsCacheControl) {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
};
} else if (
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(model) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(model) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(model)
) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
} else {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
@@ -74,8 +72,7 @@ function configureReasoning(anthropicInput, extendedOptions = {}) {
if (
extendedOptions.thinking &&
updatedOptions?.model &&
(/claude-3[-.]7/.test(updatedOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(updatedOptions.model))
/claude-3[-.]7/.test(updatedOptions.model)
) {
updatedOptions.thinking = {
type: 'enabled',

View File

@@ -25,10 +25,10 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
let credentials = isUserProvided
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.bedrock })
: {
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
};
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
};
if (!credentials) {
throw new Error('Bedrock credentials not provided. Please provide them again.');

View File

@@ -105,7 +105,6 @@ const initializeClient = async ({ req, res, endpointOption, optionsOnly, overrid
headers: resolvedHeaders,
addParams: endpointConfig.addParams,
dropParams: endpointConfig.dropParams,
customParams: endpointConfig.customParams,
titleConvo: endpointConfig.titleConvo,
titleModel: endpointConfig.titleModel,
forcePrompt: endpointConfig.forcePrompt,

View File

@@ -1,74 +0,0 @@
const axios = require('axios');
const fs = require('fs');
const { logger } = require('~/config');
/**
* Uploads a document to Azure Document Intelligence API and returns the Markdown result.
*
* @param {Object} params - The parameters for the Azure Document Intelligence request.
* @param {string} params.filePath - The path to the file on disk.
* @param {string} params.apiKey - Azure API key.
* @param {string} params.endpoint - Azure Document Intelligence endpoint.
* @param {string} params.modelId - The model ID to use for analysis.
* @returns {Promise<Object>} - The Document Intelligence result.
*/
async function uploadAzureDocumentIntelligence({ filePath, apiKey, endpoint, modelId }) {
// Read and encode file
const fileBuffer = fs.readFileSync(filePath);
const base64Source = fileBuffer.toString('base64');
// Build URL (ensure no trailing slash on endpoint)
const url = `${endpoint.replace(/\/+$/, '')}/documentModels/${modelId}:analyze?outputContentFormat=markdown`;
try {
// Kick off the analysis
const response = await axios.post(
url,
{ base64Source },
{
headers: {
'Ocp-Apim-Subscription-Key': apiKey,
'Content-Type': 'application/json',
},
},
);
// Axios lower-cases header keys, but allow either form
const headers = response.headers || {};
const operationLocation = headers['operation-location'] || headers['Operation-Location'];
if (!operationLocation) {
throw new Error('Missing Operation-Location header in Azure response.');
}
// Poll until done
let resultContent;
while (true) {
const pollResponse = await axios.get(operationLocation, {
headers: { 'Ocp-Apim-Subscription-Key': apiKey },
});
const { status, resultUrl } = pollResponse.data;
if (status === 'succeeded') {
const final = await axios.get(resultUrl, {
headers: { 'Ocp-Apim-Subscription-Key': apiKey },
});
resultContent = final.data.analyzeResult.content;
break;
}
if (status === 'failed') {
throw new Error('Azure Document Intelligence processing failed.');
}
// Wait 2s before retry
await new Promise((r) => setTimeout(r, 2000));
}
return resultContent;
} catch (error) {
logger.error('Error performing Azure Document Intelligence:', error.message);
throw error;
}
}
module.exports = {
uploadAzureDocumentIntelligence,
};

View File

@@ -1,103 +0,0 @@
const fs = require('fs');
const mockAxios = {
interceptors: {
request: { use: jest.fn(), eject: jest.fn() },
response: { use: jest.fn(), eject: jest.fn() },
},
create: jest.fn().mockReturnValue({
defaults: { proxy: null },
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
}),
get: jest.fn().mockResolvedValue({ data: {} }),
post: jest.fn().mockResolvedValue({ data: {} }),
put: jest.fn().mockResolvedValue({ data: {} }),
delete: jest.fn().mockResolvedValue({ data: {} }),
reset: jest.fn().mockImplementation(function () {
this.get.mockClear();
this.post.mockClear();
this.put.mockClear();
this.delete.mockClear();
this.create.mockClear();
}),
};
jest.mock('axios', () => mockAxios);
jest.mock('fs');
jest.mock('~/config', () => ({
logger: { error: jest.fn() },
}));
const { uploadAzureDocumentIntelligence } = require('./crud');
describe('AzureDocumentIntelligence Service', () => {
beforeEach(() => {
mockAxios.reset();
fs.readFileSync.mockReset();
});
it('should upload and poll until it gets the Markdown result', async () => {
const mockFileBuffer = Buffer.from('test file content');
const mockBase64 = mockFileBuffer.toString('base64');
const mockOpLocation = 'https://azure-ocr-endpoint.com/operations/123';
const mockResultUrl = 'https://azure-ocr-endpoint.com/results/123';
const mockFinal = { analyzeResult: { content: 'Final analysis result' } };
// fs.readFileSync returns our buffer
fs.readFileSync.mockReturnValue(mockFileBuffer);
// First axios.post => returns Operation-Location header
mockAxios.post.mockResolvedValueOnce({
headers: { 'Operation-Location': mockOpLocation },
});
// First axios.get => poll success, returns status + resultUrl
// Second axios.get => fetch final result
mockAxios.get
.mockResolvedValueOnce({ data: { status: 'succeeded', resultUrl: mockResultUrl } })
.mockResolvedValueOnce({ data: mockFinal });
const result = await uploadAzureDocumentIntelligence({
filePath: '/path/to/test.pdf',
apiKey: 'azure-api-key',
endpoint: 'https://azure-ocr-endpoint.com/',
modelId: 'prebuilt-layout',
});
// Validate read
expect(fs.readFileSync).toHaveBeenCalledWith('/path/to/test.pdf');
// Validate initial POST
expect(mockAxios.post).toHaveBeenCalledWith(
'https://azure-ocr-endpoint.com/documentModels/prebuilt-layout:analyze?outputContentFormat=markdown',
{ base64Source: mockBase64 },
expect.objectContaining({
headers: expect.objectContaining({
'Ocp-Apim-Subscription-Key': 'azure-api-key',
'Content-Type': 'application/json',
}),
}),
);
// Validate polling GET
expect(mockAxios.get).toHaveBeenCalledWith(
mockOpLocation,
expect.objectContaining({
headers: expect.objectContaining({ 'Ocp-Apim-Subscription-Key': 'azure-api-key' }),
}),
);
// Validate final fetch GET
expect(mockAxios.get).toHaveBeenCalledWith(
mockResultUrl,
expect.objectContaining({
headers: expect.objectContaining({ 'Ocp-Apim-Subscription-Key': 'azure-api-key' }),
}),
);
expect(result).toEqual('Final analysis result');
});
});

View File

@@ -1,5 +0,0 @@
const crud = require('./crud');
module.exports = {
...crud,
};

View File

@@ -2,12 +2,7 @@
const fs = require('fs');
const path = require('path');
const FormData = require('form-data');
const {
FileSources,
envVarRegex,
extractEnvVariable,
extractVariableName,
} = require('librechat-data-provider');
const { FileSources, envVarRegex, extractEnvVariable } = require('librechat-data-provider');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { logger, createAxiosInstance } = require('~/config');
const { logAxiosError } = require('~/utils/axios');
@@ -113,6 +108,11 @@ async function performOCR({
});
}
function extractVariableName(str) {
const match = str.match(envVarRegex);
return match ? match[1] : null;
}
/**
* Uploads a file to the Mistral OCR API and processes the OCR result.
*

View File

@@ -54,7 +54,7 @@ async function deleteOpenAIFile(req, file, openai) {
throw new Error('OpenAI returned `false` for deleted status');
}
logger.debug(
`[deleteOpenAIFile] User ${req.user.id} successfully deleted file "${file.file_id}" from OpenAI`,
`[deleteOpenAIFile] User ${req.user.id} successfully deleted ${file.file_id} from OpenAI`,
);
} catch (error) {
logger.error('[deleteOpenAIFile] Error deleting file from OpenAI: ' + error.message);

View File

@@ -5,10 +5,9 @@ const { EModelEndpoint } = require('librechat-data-provider');
* Resizes an image from a given buffer based on the specified resolution.
*
* @param {Buffer} inputBuffer - The buffer of the image to be resized.
* @param {'low' | 'high' | {percentage?: number, px?: number}} resolution - The resolution to resize the image to.
* @param {'low' | 'high'} resolution - The resolution to resize the image to.
* 'low' for a maximum of 512x512 resolution,
* 'high' for a maximum of 768x2000 resolution,
* or a custom object with percentage or px values.
* 'high' for a maximum of 768x2000 resolution.
* @param {EModelEndpoint} endpoint - Identifier for specific endpoint handling
* @returns {Promise<{buffer: Buffer, width: number, height: number}>} An object containing the resized image buffer and its dimensions.
* @throws Will throw an error if the resolution parameter is invalid.
@@ -18,32 +17,10 @@ async function resizeImageBuffer(inputBuffer, resolution, endpoint) {
const maxShortSideHighRes = 768;
const maxLongSideHighRes = endpoint === EModelEndpoint.anthropic ? 1568 : 2000;
let customPercent, customPx;
if (resolution && typeof resolution === 'object') {
if (typeof resolution.percentage === 'number') {
customPercent = resolution.percentage;
} else if (typeof resolution.px === 'number') {
customPx = resolution.px;
}
}
let newWidth, newHeight;
let resizeOptions = { fit: 'inside', withoutEnlargement: true };
if (customPercent != null || customPx != null) {
// percentage-based resize
const metadata = await sharp(inputBuffer).metadata();
if (customPercent != null) {
newWidth = Math.round(metadata.width * (customPercent / 100));
newHeight = Math.round(metadata.height * (customPercent / 100));
} else {
// pixel max on both sides
newWidth = Math.min(metadata.width, customPx);
newHeight = Math.min(metadata.height, customPx);
}
resizeOptions.width = newWidth;
resizeOptions.height = newHeight;
} else if (resolution === 'low') {
if (resolution === 'low') {
resizeOptions.width = maxLowRes;
resizeOptions.height = maxLowRes;
} else if (resolution === 'high') {

View File

@@ -137,13 +137,11 @@ const processDeleteRequest = async ({ req, files }) => {
/** @type {Record<string, OpenAI | undefined>} */
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
const initializeClients = async () => {
if (req.app.locals[EModelEndpoint.assistants]) {
const openAIClient = await getOpenAIClient({
req,
overrideEndpoint: EModelEndpoint.assistants,
});
client[FileSources.openai] = openAIClient.openai;
}
const openAIClient = await getOpenAIClient({
req,
overrideEndpoint: EModelEndpoint.assistants,
});
client[FileSources.openai] = openAIClient.openai;
if (!req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
return;
@@ -695,7 +693,7 @@ const processOpenAIFile = async ({
const processOpenAIImageOutput = async ({ req, buffer, file_id, filename, fileExt }) => {
const currentDate = new Date();
const formattedDate = currentDate.toISOString();
const _file = await convertImage(req, buffer, undefined, `${file_id}${fileExt}`);
const _file = await convertImage(req, buffer, 'high', `${file_id}${fileExt}`);
const file = {
..._file,
usage: 1,
@@ -840,9 +838,8 @@ function base64ToBuffer(base64String) {
async function saveBase64Image(
url,
{ req, file_id: _file_id, filename: _filename, endpoint, context, resolution },
{ req, file_id: _file_id, filename: _filename, endpoint, context, resolution = 'high' },
) {
const effectiveResolution = resolution ?? req.app.locals.fileConfig?.imageGeneration ?? 'high';
const file_id = _file_id ?? v4();
let filename = `${file_id}-${_filename}`;
const { buffer: inputBuffer, type } = base64ToBuffer(url);
@@ -855,7 +852,7 @@ async function saveBase64Image(
}
}
const image = await resizeImageBuffer(inputBuffer, effectiveResolution, endpoint);
const image = await resizeImageBuffer(inputBuffer, resolution, endpoint);
const source = req.app.locals.fileStrategy;
const { saveBuffer } = getStrategyFunctions(source);
const filepath = await saveBuffer({

View File

@@ -47,7 +47,6 @@ const { uploadOpenAIFile, deleteOpenAIFile, getOpenAIFileStream } = require('./O
const { getCodeOutputDownloadStream, uploadCodeEnvFile } = require('./Code');
const { uploadVectors, deleteVectors } = require('./VectorDB');
const { uploadMistralOCR } = require('./MistralOCR');
const { uploadAzureDocumentIntelligence } = require('./AzureDocumentIntelligence'); // Import the function
/**
* Firebase Storage Strategy Functions
@@ -203,26 +202,6 @@ const mistralOCRStrategy = () => ({
handleFileUpload: uploadMistralOCR,
});
const azureOCRStrategy = () => ({
/** @type {typeof saveFileFromURL | null} */
saveURL: null,
/** @type {typeof saveFileFromURL | null} */
getFileURL: null,
/** @type {typeof saveFileFromURL | null} */
saveBuffer: null,
/** @type {typeof saveFileFromURL | null} */
processAvatar: null,
/** @type {typeof saveFileFromURL | null} */
handleImageUpload: null,
/** @type {typeof saveFileFromURL | null} */
prepareImagePayload: null,
/** @type {typeof saveFileFromURL | null} */
deleteFile: null,
handleFileUpload: uploadAzureDocumentIntelligence,
/** @type {typeof saveFileFromURL | null} */
getDownloadStream: null,
});
// Strategy Selector
const getStrategyFunctions = (fileSource) => {
if (fileSource === FileSources.firebase) {
@@ -243,8 +222,6 @@ const getStrategyFunctions = (fileSource) => {
return codeOutputStrategy();
} else if (fileSource === FileSources.mistral_ocr) {
return mistralOCRStrategy();
} else if (fileSource === FileSources.azure_ocr) {
return azureOCRStrategy();
} else {
throw new Error('Invalid file source');
}

View File

@@ -1,6 +1,5 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { normalizeServerName } = require('librechat-mcp');
const { Constants: AgentConstants, Providers } = require('@librechat/agents');
const {
Constants,
@@ -39,7 +38,6 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
}
const [toolName, serverName] = toolKey.split(Constants.mcp_delimiter);
const normalizedToolKey = `${toolName}${Constants.mcp_delimiter}${normalizeServerName(serverName)}`;
if (!req.user?.id) {
logger.error(
@@ -85,7 +83,7 @@ async function createMCPTool({ req, toolKey, provider: _provider }) {
const toolInstance = tool(_call, {
schema,
name: normalizedToolKey,
name: toolKey,
description: description || '',
responseFormat: AgentConstants.CONTENT_AND_ARTIFACT,
});

View File

@@ -66,26 +66,16 @@ const getUserPluginAuthValue = async (userId, authField, throwError = true) => {
// }
// };
/**
*
* @async
* @param {string} userId
* @param {string} authField
* @param {string} pluginKey
* @param {string} value
* @returns {Promise<IPluginAuth>}
* @throws {Error}
*/
const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
try {
const encryptedValue = await encrypt(value);
const pluginAuth = await PluginAuth.findOne({ userId, authField }).lean();
if (pluginAuth) {
return await PluginAuth.findOneAndUpdate(
const pluginAuth = await PluginAuth.updateOne(
{ userId, authField },
{ $set: { value: encryptedValue } },
{ new: true, upsert: true },
).lean();
);
return pluginAuth;
} else {
const newPluginAuth = await new PluginAuth({
userId,
@@ -94,7 +84,7 @@ const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
pluginKey,
});
await newPluginAuth.save();
return newPluginAuth.toObject();
return newPluginAuth;
}
} catch (err) {
logger.error('[updateUserPluginAuth]', err);
@@ -102,14 +92,6 @@ const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
}
};
/**
* @async
* @param {string} userId
* @param {string} authField
* @param {boolean} [all]
* @returns {Promise<import('mongoose').DeleteResult>}
* @throws {Error}
*/
const deleteUserPluginAuth = async (userId, authField, all = false) => {
if (all) {
try {

View File

@@ -1,8 +1,8 @@
const fs = require('fs');
const path = require('path');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { Calculator } = require('@langchain/community/tools/calculator');
const { tool: toolFn, Tool, DynamicStructuredTool } = require('@langchain/core/tools');
const { Calculator } = require('@langchain/community/tools/calculator');
const {
Tools,
ErrorTypes,
@@ -29,7 +29,6 @@ const {
toolkits,
} = require('~/app/clients/tools');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
const { createOnSearchResults } = require('~/server/services/Tools/search');
const { isActionDomainAllowed } = require('~/server/services/domains');
const { getEndpointsConfig } = require('~/server/services/Config');
const { recordUsage } = require('~/server/services/Threads');
@@ -505,15 +504,11 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
const checkCapability = (capability) => enabledCapabilities.has(capability);
const areToolsEnabled = checkCapability(AgentCapabilities.tools);
let includesWebSearch = false;
const _agentTools = agent.tools?.filter((tool) => {
if (tool === Tools.file_search) {
return checkCapability(AgentCapabilities.file_search);
} else if (tool === Tools.execute_code) {
return checkCapability(AgentCapabilities.execute_code);
} else if (tool === Tools.web_search) {
includesWebSearch = checkCapability(AgentCapabilities.web_search);
return includesWebSearch;
} else if (!areToolsEnabled && !tool.includes(actionDelimiter)) {
return false;
}
@@ -523,11 +518,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
if (!_agentTools || _agentTools.length === 0) {
return {};
}
/** @type {ReturnType<createOnSearchResults>} */
let webSearchCallbacks;
if (includesWebSearch) {
webSearchCallbacks = createOnSearchResults(res);
}
const { loadedTools, toolContextMap } = await loadTools({
agent,
functions: true,
@@ -541,7 +532,6 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
uploadImageBuffer,
returnMetadata: true,
fileStrategy: req.app.locals.fileStrategy,
[Tools.web_search]: webSearchCallbacks,
},
});

View File

@@ -1,122 +0,0 @@
const { nanoid } = require('nanoid');
const { Tools } = require('librechat-data-provider');
const { logger } = require('~/config');
/**
* Creates a function to handle search results and stream them as attachments
* @param {import('http').ServerResponse} res - The HTTP server response object
* @returns {{ onSearchResults: function(SearchResult, GraphRunnableConfig): void; onGetHighlights: function(string): void}} - Function that takes search results and returns or streams an attachment
*/
function createOnSearchResults(res) {
const context = {
sourceMap: new Map(),
searchResultData: undefined,
toolCallId: undefined,
attachmentName: undefined,
messageId: undefined,
conversationId: undefined,
};
/**
* @param {SearchResult} results
* @param {GraphRunnableConfig} runnableConfig
*/
function onSearchResults(results, runnableConfig) {
logger.info(
`[onSearchResults] user: ${runnableConfig.metadata.user_id} | thread_id: ${runnableConfig.metadata.thread_id} | run_id: ${runnableConfig.metadata.run_id}`,
results,
);
if (!results.success) {
logger.error(
`[onSearchResults] user: ${runnableConfig.metadata.user_id} | thread_id: ${runnableConfig.metadata.thread_id} | run_id: ${runnableConfig.metadata.run_id} | error: ${results.error}`,
);
return;
}
const turn = runnableConfig.toolCall?.turn ?? 0;
const data = { turn, ...structuredClone(results.data ?? {}) };
context.searchResultData = data;
// Map sources to links
for (let i = 0; i < data.organic.length; i++) {
const source = data.organic[i];
if (source.link) {
context.sourceMap.set(source.link, {
type: 'organic',
index: i,
turn,
});
}
}
for (let i = 0; i < data.topStories.length; i++) {
const source = data.topStories[i];
if (source.link) {
context.sourceMap.set(source.link, {
type: 'topStories',
index: i,
turn,
});
}
}
context.toolCallId = runnableConfig.toolCall.id;
context.messageId = runnableConfig.metadata.run_id;
context.conversationId = runnableConfig.metadata.thread_id;
context.attachmentName = `${runnableConfig.toolCall.name}_${context.toolCallId}_${nanoid()}`;
const attachment = buildAttachment(context);
if (!res.headersSent) {
return attachment;
}
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
}
/**
* @param {string} link
* @returns {void}
*/
function onGetHighlights(link) {
const source = context.sourceMap.get(link);
if (!source) {
return;
}
const { type, index } = source;
const data = context.searchResultData;
if (!data) {
return;
}
if (data[type][index] != null) {
data[type][index].processed = true;
}
const attachment = buildAttachment(context);
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
}
return {
onSearchResults,
onGetHighlights,
};
}
/**
* Helper function to build an attachment object
* @param {object} context - The context containing attachment data
* @returns {object} - The attachment object
*/
function buildAttachment(context) {
return {
messageId: context.messageId,
toolCallId: context.toolCallId,
conversationId: context.conversationId,
name: context.attachmentName,
type: Tools.web_search,
[Tools.web_search]: context.searchResultData,
};
}
module.exports = {
createOnSearchResults,
};

View File

@@ -1,9 +1,7 @@
const {
Constants,
webSearchKeys,
deprecatedAzureVariables,
conflictingAzureVariables,
extractVariableName,
} = require('librechat-data-provider');
const { isEnabled, checkEmailConfig } = require('~/server/utils');
const { logger } = require('~/config');
@@ -143,56 +141,4 @@ function checkPasswordReset() {
}
}
/**
* Checks web search configuration values to ensure they are environment variable references.
* Warns if actual API keys or URLs are used instead of environment variable references.
* Logs debug information for properly configured environment variable references.
* @param {Object} webSearchConfig - The loaded web search configuration object.
*/
function checkWebSearchConfig(webSearchConfig) {
if (!webSearchConfig) {
return;
}
webSearchKeys.forEach((key) => {
const value = webSearchConfig[key];
if (typeof value === 'string') {
const varName = extractVariableName(value);
if (varName) {
// This is a proper environment variable reference
const actualValue = process.env[varName];
if (actualValue) {
logger.debug(`Web search ${key}: Using environment variable ${varName} with value set`);
} else {
logger.debug(
`Web search ${key}: Using environment variable ${varName} (not set in environment, user provided value)`,
);
}
} else {
// This is not an environment variable reference - warn user
logger.warn(
`❗ Web search configuration error: ${key} contains an actual value instead of an environment variable reference.
Current value: "${value.substring(0, 10)}..."
This is incorrect! You should use environment variable references in your librechat.yaml file, such as:
${key}: "\${YOUR_ENV_VAR_NAME}"
Then set the actual API key in your .env file or environment variables.
More info: https://www.librechat.ai/docs/configuration/librechat_yaml/web_search`,
);
}
}
});
}
module.exports = {
checkHealth,
checkConfig,
checkVariables,
checkAzureVariables,
checkWebSearchConfig,
};
module.exports = { checkVariables, checkHealth, checkConfig, checkAzureVariables };

View File

@@ -1,203 +0,0 @@
// Mock librechat-data-provider
jest.mock('librechat-data-provider', () => ({
...jest.requireActual('librechat-data-provider'),
extractVariableName: jest.fn(),
}));
// Mock the config logger
jest.mock('~/config', () => ({
logger: {
debug: jest.fn(),
warn: jest.fn(),
},
}));
const { checkWebSearchConfig } = require('./checks');
const { logger } = require('~/config');
const { extractVariableName } = require('librechat-data-provider');
describe('checkWebSearchConfig', () => {
let originalEnv;
beforeEach(() => {
// Clear all mocks
jest.clearAllMocks();
// Store original environment
originalEnv = process.env;
// Reset process.env
process.env = { ...originalEnv };
});
afterEach(() => {
// Restore original environment
process.env = originalEnv;
});
describe('when webSearchConfig is undefined or null', () => {
it('should return early without logging when config is undefined', () => {
checkWebSearchConfig(undefined);
expect(logger.debug).not.toHaveBeenCalled();
expect(logger.warn).not.toHaveBeenCalled();
});
it('should return early without logging when config is null', () => {
checkWebSearchConfig(null);
expect(logger.debug).not.toHaveBeenCalled();
expect(logger.warn).not.toHaveBeenCalled();
});
});
describe('when config values are proper environment variable references', () => {
it('should log debug message for each valid environment variable with value set', () => {
const config = {
serperApiKey: '${SERPER_API_KEY}',
jinaApiKey: '${JINA_API_KEY}',
};
extractVariableName.mockReturnValueOnce('SERPER_API_KEY').mockReturnValueOnce('JINA_API_KEY');
process.env.SERPER_API_KEY = 'test-serper-key';
process.env.JINA_API_KEY = 'test-jina-key';
checkWebSearchConfig(config);
expect(extractVariableName).toHaveBeenCalledWith('${SERPER_API_KEY}');
expect(extractVariableName).toHaveBeenCalledWith('${JINA_API_KEY}');
expect(logger.debug).toHaveBeenCalledWith(
'Web search serperApiKey: Using environment variable SERPER_API_KEY with value set',
);
expect(logger.debug).toHaveBeenCalledWith(
'Web search jinaApiKey: Using environment variable JINA_API_KEY with value set',
);
expect(logger.warn).not.toHaveBeenCalled();
});
it('should log debug message for environment variables not set in environment', () => {
const config = {
cohereApiKey: '${COHERE_API_KEY}',
};
extractVariableName.mockReturnValue('COHERE_API_KEY');
delete process.env.COHERE_API_KEY;
checkWebSearchConfig(config);
expect(logger.debug).toHaveBeenCalledWith(
'Web search cohereApiKey: Using environment variable COHERE_API_KEY (not set in environment, user provided value)',
);
expect(logger.warn).not.toHaveBeenCalled();
});
});
describe('when config values are actual values instead of environment variable references', () => {
it('should warn when serperApiKey contains actual API key', () => {
const config = {
serperApiKey: 'sk-1234567890abcdef',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'❗ Web search configuration error: serperApiKey contains an actual value',
),
);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Current value: "sk-1234567..."'),
);
expect(logger.debug).not.toHaveBeenCalled();
});
it('should warn when firecrawlApiUrl contains actual URL', () => {
const config = {
firecrawlApiUrl: 'https://api.firecrawl.dev',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'❗ Web search configuration error: firecrawlApiUrl contains an actual value',
),
);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Current value: "https://ap..."'),
);
});
it('should include documentation link in warning message', () => {
const config = {
firecrawlApiKey: 'fc-actual-key',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'More info: https://www.librechat.ai/docs/configuration/librechat_yaml/web_search',
),
);
});
});
describe('when config contains mixed value types', () => {
it('should only process string values and ignore non-string values', () => {
const config = {
serperApiKey: '${SERPER_API_KEY}',
safeSearch: 1,
scraperTimeout: 7500,
jinaApiKey: 'actual-key',
};
extractVariableName.mockReturnValueOnce('SERPER_API_KEY').mockReturnValueOnce(null);
process.env.SERPER_API_KEY = 'test-key';
checkWebSearchConfig(config);
expect(extractVariableName).toHaveBeenCalledTimes(2);
expect(logger.debug).toHaveBeenCalledTimes(1);
expect(logger.warn).toHaveBeenCalledTimes(1);
});
});
describe('edge cases', () => {
it('should handle config with no web search keys', () => {
const config = {
someOtherKey: 'value',
anotherKey: '${SOME_VAR}',
};
checkWebSearchConfig(config);
expect(extractVariableName).not.toHaveBeenCalled();
expect(logger.debug).not.toHaveBeenCalled();
expect(logger.warn).not.toHaveBeenCalled();
});
it('should truncate long values in warning messages', () => {
const config = {
serperApiKey: 'this-is-a-very-long-api-key-that-should-be-truncated-in-the-warning-message',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Current value: "this-is-a-..."'),
);
});
});
});

View File

@@ -38,7 +38,6 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
agents: interfaceConfig?.agents ?? defaults.agents,
temporaryChat: interfaceConfig?.temporaryChat ?? defaults.temporaryChat,
runCode: interfaceConfig?.runCode ?? defaults.runCode,
webSearch: interfaceConfig?.webSearch ?? defaults.webSearch,
customWelcome: interfaceConfig?.customWelcome ?? defaults.customWelcome,
});
@@ -49,7 +48,6 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: loadedInterface.runCode },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: loadedInterface.webSearch },
});
await updateAccessPermissions(SystemRoles.ADMIN, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
@@ -58,7 +56,6 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: loadedInterface.runCode },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: loadedInterface.webSearch },
});
let i = 0;
@@ -77,7 +74,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
// warn about config.modelSpecs.prioritize if true and presets are enabled, that default presets will conflict with prioritizing model specs.
if (config?.modelSpecs?.prioritize && loadedInterface.presets) {
logger.warn(
"Note: Prioritizing model specs can conflict with default presets if a default preset is set. It's recommended to disable presets from the interface or disable use of a default preset.",
'Note: Prioritizing model specs can conflict with default presets if a default preset is set. It\'s recommended to disable presets from the interface or disable use of a default preset.',
);
i === 0 && i++;
}
@@ -91,14 +88,14 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
loadedInterface.parameters)
) {
logger.warn(
"Note: Enforcing model specs can conflict with the interface options: endpointsMenu, modelSelect, presets, and parameters. It's recommended to disable these options from the interface or disable enforcing model specs.",
'Note: Enforcing model specs can conflict with the interface options: endpointsMenu, modelSelect, presets, and parameters. It\'s recommended to disable these options from the interface or disable enforcing model specs.',
);
i === 0 && i++;
}
// warn if enforce is true and prioritize is not, that enforcing model specs without prioritizing them can lead to unexpected behavior.
if (config?.modelSpecs?.enforce && !config?.modelSpecs?.prioritize) {
logger.warn(
"Note: Enforcing model specs without prioritizing them can lead to unexpected behavior. It's recommended to enable prioritizing model specs if enforcing them.",
'Note: Enforcing model specs without prioritizing them can lead to unexpected behavior. It\'s recommended to enable prioritizing model specs if enforcing them.',
);
i === 0 && i++;
}

View File

@@ -16,7 +16,6 @@ describe('loadDefaultInterface', () => {
agents: true,
temporaryChat: true,
runCode: true,
webSearch: true,
},
};
const configDefaults = { interface: {} };
@@ -30,7 +29,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: true },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
});
});
@@ -43,7 +41,6 @@ describe('loadDefaultInterface', () => {
agents: false,
temporaryChat: false,
runCode: false,
webSearch: false,
},
};
const configDefaults = { interface: {} };
@@ -57,7 +54,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: false },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: false },
});
});
@@ -74,7 +70,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -87,7 +82,6 @@ describe('loadDefaultInterface', () => {
agents: undefined,
temporaryChat: undefined,
runCode: undefined,
webSearch: undefined,
},
};
const configDefaults = { interface: {} };
@@ -101,7 +95,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -114,7 +107,6 @@ describe('loadDefaultInterface', () => {
agents: true,
temporaryChat: undefined,
runCode: false,
webSearch: true,
},
};
const configDefaults = { interface: {} };
@@ -128,7 +120,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
});
});
@@ -142,7 +133,6 @@ describe('loadDefaultInterface', () => {
agents: true,
temporaryChat: true,
runCode: true,
webSearch: true,
},
};
@@ -155,7 +145,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: true },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
});
});
@@ -172,7 +161,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -189,7 +177,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -206,7 +193,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -232,7 +218,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -246,7 +231,6 @@ describe('loadDefaultInterface', () => {
agents: undefined,
temporaryChat: undefined,
runCode: undefined,
webSearch: undefined,
},
};
@@ -259,33 +243,6 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
it('should call updateAccessPermissions with the correct parameters when WEB_SEARCH is undefined', async () => {
const config = {
interface: {
prompts: true,
bookmarks: false,
multiConvo: true,
agents: false,
temporaryChat: true,
runCode: false,
},
};
const configDefaults = { interface: {} };
await loadDefaultInterface(config, configDefaults);
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
});

View File

@@ -26,16 +26,7 @@ function loadTurnstileConfig(config, configDefaults) {
options: customTurnstile.options ?? defaults.options,
});
const enabled = Boolean(loadedTurnstile.siteKey);
if (enabled) {
logger.info(
'Turnstile is ENABLED with configuration:\n' + JSON.stringify(loadedTurnstile, null, 2),
);
} else {
logger.info('Turnstile is DISABLED (no siteKey provided).');
}
logger.info('Turnstile configuration loaded:\n' + JSON.stringify(loadedTurnstile, null, 2));
return loadedTurnstile;
}

View File

@@ -10,7 +10,6 @@ const {
discordLogin,
facebookLogin,
appleLogin,
openIdJwtLogin,
} = require('~/strategies');
const { isEnabled } = require('~/server/utils');
const keyvRedis = require('~/cache/keyvRedis');
@@ -20,7 +19,7 @@ const { logger } = require('~/config');
*
* @param {Express.Application} app
*/
const configureSocialLogins = async (app) => {
const configureSocialLogins = (app) => {
logger.info('Configuring social logins...');
if (process.env.GOOGLE_CLIENT_ID && process.env.GOOGLE_CLIENT_SECRET) {
@@ -63,11 +62,8 @@ const configureSocialLogins = async (app) => {
}
app.use(session(sessionOptions));
app.use(passport.session());
const config = await setupOpenId();
if (isEnabled(process.env.OPENID_REUSE_TOKENS)) {
logger.info('OpenID token reuse is enabled.');
passport.use('openidJwt', openIdJwtLogin(config));
}
setupOpenId();
logger.info('OpenID Connect configured.');
}
};

View File

@@ -200,12 +200,11 @@ function generateConfig(key, baseURL, endpoint) {
config.capabilities = [
AgentCapabilities.execute_code,
AgentCapabilities.file_search,
AgentCapabilities.web_search,
AgentCapabilities.artifacts,
AgentCapabilities.actions,
AgentCapabilities.tools,
AgentCapabilities.chain,
AgentCapabilities.ocr,
AgentCapabilities.chain,
];
}

View File

@@ -1,5 +1,6 @@
const fs = require('fs').promises;
const { getImporter } = require('./importers');
const { indexSync } = require('~/lib/db');
const { logger } = require('~/config');
/**
@@ -14,6 +15,8 @@ const importConversations = async (job) => {
const jsonData = JSON.parse(fileData);
const importer = getImporter(jsonData);
await importer(jsonData, requestUserId);
// Sync Meilisearch index
await indexSync();
logger.debug(`user: ${requestUserId} | Finished importing conversations`);
} catch (error) {
logger.error(`user: ${requestUserId} | Failed to import conversation: `, error);

View File

@@ -84,14 +84,14 @@ describe('importChatGptConvo', () => {
const { parent } = jsonData[0].mapping[id];
const expectedParentId = parent
? (idToUUIDMap.get(parent) ?? Constants.NO_PARENT)
? idToUUIDMap.get(parent) ?? Constants.NO_PARENT
: Constants.NO_PARENT;
const actualMessageId = idToUUIDMap.get(id);
const actualParentId = actualMessageId
? importBatchBuilder.saveMessage.mock.calls.find(
(call) => call[0].messageId === actualMessageId,
)[0].parentMessageId
(call) => call[0].messageId === actualMessageId,
)[0].parentMessageId
: Constants.NO_PARENT;
expect(actualParentId).toBe(expectedParentId);
@@ -544,7 +544,7 @@ describe('processAssistantMessage', () => {
// Expected output should have all citations replaced with markdown links
const expectedOutput =
"Signal Sciences is a web application security company that was founded on March 10, 2014, by Andrew Peterson, Nick Galbreath, and Zane Lackey. It operates as a for-profit company with its legal name being Signal Sciences Corp. The company has achieved significant growth and is recognized as the fastest-growing web application security company in the world. Signal Sciences developed a next-gen web application firewall (NGWAF) and runtime application self-protection (RASP) technologies designed to increase security and maintain reliability without compromising the performance of modern web applications distributed across cloud, on-premise, edge, or hybrid environments ([Signal Sciences - Crunchbase Company Profile & Funding](https://www.crunchbase.com/organization/signal-sciences)) ([Demand More from Your WAF - Signal Sciences now part of Fastly](https://www.signalsciences.com/)).\n\nIn a major development, Fastly, Inc., a provider of an edge cloud platform, announced the completion of its acquisition of Signal Sciences on October 1, 2020. This acquisition was valued at approximately $775 million in cash and stock. By integrating Signal Sciences' powerful web application and API security solutions with Fastly's edge cloud platform and existing security offerings, they aimed to form a unified suite of security solutions. The merger was aimed at expanding Fastly's security portfolio, particularly at a time when digital security has become paramount for businesses operating online ([Fastly Completes Acquisition of Signal Sciences | Fastly](https://www.fastly.com/press/press-releases/fastly-completes-acquisition-signal-sciences)) ([Fastly Agrees to Acquire Signal Sciences for $775 Million - Cooley](https://www.cooley.com/news/coverage/2020/2020-08-27-fastly-agrees-to-acquire-signal-sciences-for-775-million)).";
'Signal Sciences is a web application security company that was founded on March 10, 2014, by Andrew Peterson, Nick Galbreath, and Zane Lackey. It operates as a for-profit company with its legal name being Signal Sciences Corp. The company has achieved significant growth and is recognized as the fastest-growing web application security company in the world. Signal Sciences developed a next-gen web application firewall (NGWAF) and runtime application self-protection (RASP) technologies designed to increase security and maintain reliability without compromising the performance of modern web applications distributed across cloud, on-premise, edge, or hybrid environments ([Signal Sciences - Crunchbase Company Profile & Funding](https://www.crunchbase.com/organization/signal-sciences)) ([Demand More from Your WAF - Signal Sciences now part of Fastly](https://www.signalsciences.com/)).\n\nIn a major development, Fastly, Inc., a provider of an edge cloud platform, announced the completion of its acquisition of Signal Sciences on October 1, 2020. This acquisition was valued at approximately $775 million in cash and stock. By integrating Signal Sciences\' powerful web application and API security solutions with Fastly\'s edge cloud platform and existing security offerings, they aimed to form a unified suite of security solutions. The merger was aimed at expanding Fastly\'s security portfolio, particularly at a time when digital security has become paramount for businesses operating online ([Fastly Completes Acquisition of Signal Sciences | Fastly](https://www.fastly.com/press/press-releases/fastly-completes-acquisition-signal-sciences)) ([Fastly Agrees to Acquire Signal Sciences for $775 Million - Cooley](https://www.cooley.com/news/coverage/2020/2020-08-27-fastly-agrees-to-acquire-signal-sciences-for-775-million)).';
const result = processAssistantMessage(assistantMessage, messageText);
expect(result).toBe(expectedOutput);
@@ -603,7 +603,7 @@ describe('processAssistantMessage', () => {
// In a ReDoS vulnerability, time would roughly double with each size increase
for (let i = 1; i < results.length; i++) {
const ratio = results[i] / results[i - 1];
expect(ratio).toBeLessThan(3); // Allow for CI environment variability while still catching ReDoS
expect(ratio).toBeLessThan(2); // Processing time should not double
console.log(`Size ${sizes[i]} processing time ratio: ${ratio}`);
}

View File

@@ -14,7 +14,6 @@ const staticCache = (staticPath) =>
res.setHeader('Cache-Control', `public, max-age=${maxAge}, s-maxage=${sMaxAge}`);
}
},
index: false,
});
module.exports = staticCache;

View File

@@ -0,0 +1,165 @@
const fetch = require('node-fetch');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { logger } = require('~/config');
// Microsoft SDK
const { Client: MicrosoftGraphClient } = require('@microsoft/microsoft-graph-client');
/**
* Base class for provider-specific data mappers.
*/
class BaseDataMapper {
/**
* Map custom OpenID data.
* @param {string} accessToken - The access token to authenticate the request.
* @param {string|Array<string>} customQuery - Either a full query string (if it contains operators)
* or an array of fields to select.
* @returns {Promise<Record<string, unknown>>} A promise that resolves to an object of custom fields.
* @throws {Error} Throws an error if not implemented in the subclass.
*/
async mapCustomData(accessToken, customQuery) {
throw new Error('mapCustomData() must be implemented by subclasses');
}
/**
* Optionally handle proxy settings for HTTP requests.
* @returns {Object} Configuration object with proxy settings if PROXY is set.
*/
getProxyOptions() {
if (process.env.PROXY) {
const agent = new HttpsProxyAgent(process.env.PROXY);
return { agent };
}
return {};
}
}
/**
* Microsoft-specific data mapper using the Microsoft Graph SDK.
*/
class MicrosoftDataMapper extends BaseDataMapper {
/**
* Initializes the MicrosoftGraphClient once for reuse.
*/
constructor() {
super();
this.accessToken = null;
this.client = MicrosoftGraphClient.init({
defaultVersion: 'beta',
authProvider: (done) => {
// The authProvider will be called for each request to get the token
if (this.accessToken) {
done(null, this.accessToken);
} else {
done(new Error('Access token is not set.'), null);
}
},
fetch: fetch,
...this.getProxyOptions(),
});
// Bind methods to maintain context
this.mapCustomData = this.mapCustomData.bind(this);
this.cleanData = this.cleanData.bind(this);
}
/**
* Set the access token for the client.
* This method should be called before making any requests.
*
* @param {string} accessToken - The access token.
*/
setAccessToken(accessToken) {
if (!accessToken || typeof accessToken !== 'string') {
throw new Error('[MicrosoftDataMapper] Invalid access token provided.');
}
this.accessToken = accessToken;
}
/**
* Map custom OpenID data using the Microsoft Graph SDK.
*
* @param {string} accessToken - The access token to authenticate the request.
* @param {string|Array<string>} customQuery - Fields to select from the Microsoft Graph API.
* @returns {Promise<Record<string, unknown>>} A promise that resolves to an object of custom fields.
*/
async mapCustomData(accessToken, customQuery) {
try {
this.setAccessToken(accessToken);
if (!customQuery) {
logger.warn('[MicrosoftDataMapper] No customQuery provided.');
return {};
}
// Convert customQuery to a comma-separated string if it's an array
const fields = Array.isArray(customQuery) ? customQuery.join(',') : customQuery;
if (!fields) {
logger.warn('[MicrosoftDataMapper] No fields specified in customQuery.');
return {};
}
const result = await this.client.api('/me').select(fields).get();
// Clean and return the data as a plain object
return this.cleanData(result);
} catch (error) {
// Handle specific Microsoft Graph errors if needed
logger.error(`[MicrosoftDataMapper] Error fetching user data: ${error.message}`, {
stack: error.stack,
});
return {};
}
}
/**
* Recursively remove all keys starting with @odata. from an object or array.
*
* @param {object|Array} obj - The object or array to clean.
* @returns {object|Array} - The cleaned object or array.
*/
cleanData(obj) {
if (Array.isArray(obj)) {
return obj.map(this.cleanData);
} else if (obj && typeof obj === 'object') {
return Object.entries(obj).reduce((acc, [key, value]) => {
if (!key.startsWith('@odata.')) {
acc[key] = this.cleanData(value);
}
return acc;
}, {});
}
return obj;
}
}
/**
* Map provider names to their specific data mappers.
*/
const PROVIDER_MAPPERS = {
microsoft: MicrosoftDataMapper,
};
/**
* Abstraction layer that returns a provider-specific mapper instance.
*/
class OpenIdDataMapper {
/**
* Retrieve an instance of the mapper for the specified provider.
*
* @param {string} provider - The name of the provider (e.g., 'microsoft').
* @returns {BaseDataMapper} An instance of the specific data mapper for the provider.
* @throws {Error} Throws an error if no mapper is found for the specified provider.
*/
static getMapper(provider) {
const MapperClass = PROVIDER_MAPPERS[provider.toLowerCase()];
if (!MapperClass) {
throw new Error(`No mapper found for provider: ${provider}`);
}
return new MapperClass();
}
}
module.exports = OpenIdDataMapper;

View File

@@ -4,10 +4,9 @@ const googleLogin = require('./googleStrategy');
const githubLogin = require('./githubStrategy');
const discordLogin = require('./discordStrategy');
const facebookLogin = require('./facebookStrategy');
const { setupOpenId, getOpenIdConfig } = require('./openidStrategy');
const setupOpenId = require('./openidStrategy');
const jwtLogin = require('./jwtStrategy');
const ldapLogin = require('./ldapStrategy');
const openIdJwtLogin = require('./openIdJwtStrategy');
module.exports = {
appleLogin,
@@ -18,7 +17,5 @@ module.exports = {
jwtLogin,
facebookLogin,
setupOpenId,
getOpenIdConfig,
ldapLogin,
openIdJwtLogin,
};
};

View File

@@ -4,7 +4,7 @@ const { getUserById, updateUser } = require('~/models');
const { logger } = require('~/config');
// JWT strategy
const jwtLogin = () =>
const jwtLogin = async () =>
new JwtStrategy(
{
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),

View File

@@ -23,7 +23,7 @@ const {
// Check required environment variables
if (!LDAP_URL || !LDAP_USER_SEARCH_BASE) {
module.exports = null;
return null;
}
const searchAttributes = [

View File

@@ -1,52 +0,0 @@
const { SystemRoles } = require('librechat-data-provider');
const { Strategy: JwtStrategy, ExtractJwt } = require('passport-jwt');
const { updateUser, findUser } = require('~/models');
const { logger } = require('~/config');
const jwksRsa = require('jwks-rsa');
const { isEnabled } = require('~/server/utils');
/**
* @function openIdJwtLogin
* @param {import('openid-client').Configuration} openIdConfig - Configuration object for the JWT strategy.
* @returns {JwtStrategy}
* @description This function creates a JWT strategy for OpenID authentication.
* It uses the jwks-rsa library to retrieve the signing key from a JWKS endpoint.
* The strategy extracts the JWT from the Authorization header as a Bearer token.
* The JWT is then verified using the signing key, and the user is retrieved from the database.
*/
const openIdJwtLogin = (openIdConfig) =>
new JwtStrategy(
{
jwtFromRequest: ExtractJwt.fromAuthHeaderAsBearerToken(),
secretOrKeyProvider: jwksRsa.passportJwtSecret({
cache: isEnabled(process.env.OPENID_JWKS_URL_CACHE_ENABLED) || true,
cacheMaxAge: process.env.OPENID_JWKS_URL_CACHE_TIME
? eval(process.env.OPENID_JWKS_URL_CACHE_TIME)
: 60000,
jwksUri: openIdConfig.serverMetadata().jwks_uri,
}),
},
async (payload, done) => {
try {
const user = await findUser({ openidId: payload?.sub });
if (user) {
user.id = user._id.toString();
if (!user.role) {
user.role = SystemRoles.USER;
await updateUser(user.id, { role: user.role });
}
done(null, user);
} else {
logger.warn(
'[openIdJwtLogin] openId JwtStrategy => no user found with the sub claims: ' +
payload?.sub,
);
done(null, false);
}
} catch (err) {
done(err, false);
}
},
);
module.exports = openIdJwtLogin;

View File

@@ -1,129 +1,48 @@
const { CacheKeys } = require('librechat-data-provider');
const fetch = require('node-fetch');
const passport = require('passport');
const jwtDecode = require('jsonwebtoken/decode');
const { HttpsProxyAgent } = require('https-proxy-agent');
const client = require('openid-client');
const { Strategy: OpenIDStrategy } = require('openid-client/passport');
const { Issuer, Strategy: OpenIDStrategy, custom } = require('openid-client');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { findUser, createUser, updateUser } = require('~/models/userMethods');
const { hashToken } = require('~/server/utils/crypto');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const getLogStores = require('~/cache/getLogStores');
const { SystemRoles } = require('librechat-data-provider');
const OpenIdDataMapper = require('./OpenId/openidDataMapper');
/**
* @typedef {import('openid-client').ClientMetadata} ClientMetadata
* @typedef {import('openid-client').Configuration} Configuration
**/
/** @typedef {Configuration | null} */
let openidConfig = null;
//overload currenturl function because of express version 4 buggy req.host doesn't include port
//More info https://github.com/panva/openid-client/pull/713
class CustomOpenIDStrategy extends OpenIDStrategy {
currentUrl(req) {
const hostAndProtocol = process.env.DOMAIN_SERVER;
return new URL(`${hostAndProtocol}${req.originalUrl ?? req.url}`);
}
let crypto;
try {
crypto = require('node:crypto');
} catch (err) {
logger.error('[openidStrategy] crypto support is disabled!', err);
}
/**
* Exchange the access token for a new access token using the on-behalf-of flow if required.
* @param {Configuration} config
* @param {string} accessToken access token to be exchanged if necessary
* @param {string} sub - The subject identifier of the user. usually found as "sub" in the claims of the token
* @param {boolean} fromCache - Indicates whether to use cached tokens.
* @returns {Promise<string>} The new access token if exchanged, otherwise the original access token.
*/
const exchangeAccessTokenIfNeeded = async (config, accessToken, sub, fromCache = false) => {
const tokensCache = getLogStores(CacheKeys.OPENID_EXCHANGED_TOKENS);
const onBehalfFlowRequired = isEnabled(process.env.OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED);
if (onBehalfFlowRequired) {
if (fromCache) {
const cachedToken = await tokensCache.get(sub);
if (cachedToken) {
return cachedToken.access_token;
}
}
const grantResponse = await client.genericGrantRequest(
config,
'urn:ietf:params:oauth:grant-type:jwt-bearer',
{
scope: process.env.OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE || 'user.read',
assertion: accessToken,
requested_token_use: 'on_behalf_of',
},
);
await tokensCache.set(
sub,
{
access_token: grantResponse.access_token,
},
grantResponse.expires_in * 1000,
);
return grantResponse.access_token;
}
return accessToken;
};
/**
* get user info from openid provider
* @param {Configuration} config
* @param {string} accessToken access token
* @param {string} sub - The subject identifier of the user. usually found as "sub" in the claims of the token
* @returns {Promise<Object|null>}
*/
const getUserInfo = async (config, accessToken, sub) => {
try {
const exchangedAccessToken = await exchangeAccessTokenIfNeeded(config, accessToken, sub);
return await client.fetchUserInfo(config, exchangedAccessToken, sub);
} catch (error) {
logger.warn(`[openidStrategy] getUserInfo: Error fetching user info: ${error}`);
return null;
}
};
/**
* Downloads an image from a URL using an access token.
* @param {string} url
* @param {Configuration} config
* @param {string} accessToken access token
* @param {string} sub - The subject identifier of the user. usually found as "sub" in the claims of the token
* @returns {Promise<Buffer | string>} The image buffer or an empty string if the download fails.
* @param {string} accessToken
* @returns {Promise<Buffer|string>}
*/
const downloadImage = async (url, config, accessToken, sub) => {
const exchangedAccessToken = await exchangeAccessTokenIfNeeded(config, accessToken, sub, true);
const downloadImage = async (url, accessToken) => {
if (!url) {
return '';
}
const options = {
method: 'GET',
headers: { Authorization: `Bearer ${accessToken}` },
...(process.env.PROXY && { agent: new HttpsProxyAgent(process.env.PROXY) }),
};
try {
const options = {
method: 'GET',
headers: {
Authorization: `Bearer ${exchangedAccessToken}`,
},
};
if (process.env.PROXY) {
options.agent = new HttpsProxyAgent(process.env.PROXY);
}
const response = await fetch(url, options);
if (response.ok) {
const buffer = await response.buffer();
return buffer;
} else {
if (!response.ok) {
throw new Error(`${response.statusText} (HTTP ${response.status})`);
}
return await response.buffer();
} catch (error) {
logger.error(
`[openidStrategy] downloadImage: Error downloading image at URL "${url}": ${error}`,
);
logger.error(`[openidStrategy] Error downloading image at URL "${url}": ${error}`);
return '';
}
};
@@ -138,25 +57,21 @@ const downloadImage = async (url, config, accessToken, sub) => {
* @param {string} [userinfo.email] - The user's email address
* @returns {string} The determined full name of the user
*/
function getFullName(userinfo) {
const getFullName = (userinfo) => {
if (process.env.OPENID_NAME_CLAIM) {
return userinfo[process.env.OPENID_NAME_CLAIM];
}
if (userinfo.given_name && userinfo.family_name) {
return `${userinfo.given_name} ${userinfo.family_name}`;
}
if (userinfo.given_name) {
return userinfo.given_name;
}
if (userinfo.family_name) {
return userinfo.family_name;
}
return userinfo.username || userinfo.email;
}
};
/**
* Converts an input into a string suitable for a username.
@@ -168,168 +83,213 @@ function getFullName(userinfo) {
* @param {string} [defaultValue=''] - The default value to return if the input is falsy.
* @returns {string} The processed input as a string suitable for a username.
*/
function convertToUsername(input, defaultValue = '') {
const convertToUsername = (input, defaultValue = '') => {
if (typeof input === 'string') {
return input;
} else if (Array.isArray(input)) {
}
if (Array.isArray(input)) {
return input.join('_');
}
return defaultValue;
}
};
/**
* Sets up the OpenID strategy for authentication.
* This function configures the OpenID client, handles proxy settings,
* and defines the OpenID strategy for Passport.js.
*
* @async
* @function setupOpenId
* @returns {Promise<Configuration | null>} A promise that resolves when the OpenID strategy is set up and returns the openid client config object.
* @throws {Error} If an error occurs during the setup process.
* Safely decodes a JWT token.
* @param {string} token
* @returns {Object|null}
*/
const safeDecode = (token) => {
try {
const decoded = jwtDecode(token);
if (decoded && typeof decoded === 'object') {
return decoded;
}
logger.error('[openidStrategy] Decoded token is not an object.');
} catch (error) {
logger.error('[openidStrategy] Error decoding token:', error);
}
return null;
};
/**
* Extracts roles from a decoded token based on the provided path.
* @param {Object} decodedToken
* @param {string} parameterPath
* @returns {string[]}
*/
const extractRolesFromToken = (decodedToken, parameterPath) => {
if (!decodedToken) {
return [];
}
if (!parameterPath) {
return [];
}
const roles = parameterPath.split('.').reduce((obj, key) => obj?.[key] ?? null, decodedToken);
if (!Array.isArray(roles)) {
logger.error('[openidStrategy] Roles extracted from token are not in array format.');
return [];
}
return roles;
};
/**
* Updates the user's avatar if a valid picture URL is provided.
* @param {Object} user
* @param {string | undefined} pictureUrl - The URL of the user's avatar.
* @param {string} accessToken
* @returns {Promise<Object>} The updated user object.
*/
const updateUserAvatar = async (user, pictureUrl, accessToken) => {
if (!pictureUrl || (user.avatar && user.avatar.includes('manual=true'))) {
return user;
}
const fileName = crypto ? (await hashToken(user.openidId)) + '.png' : `${user.openidId}.png`;
const imageBuffer = await downloadImage(pictureUrl, accessToken);
if (imageBuffer) {
const { saveBuffer } = getStrategyFunctions(process.env.CDN_PROVIDER);
const imagePath = await saveBuffer({
fileName,
userId: user._id.toString(),
buffer: imageBuffer,
});
user.avatar = imagePath ?? '';
}
return user;
};
async function setupOpenId() {
try {
/** @type {ClientMetadata} */
// Configure proxy if defined.
if (process.env.PROXY) {
const proxyAgent = new HttpsProxyAgent(process.env.PROXY);
custom.setHttpOptionsDefaults({ agent: proxyAgent });
logger.info(`[openidStrategy] Proxy agent added: ${process.env.PROXY}`);
}
const issuer = await Issuer.discover(process.env.OPENID_ISSUER);
/* Supported Algorithms, openid-client v5 doesn't set it automatically as discovered from server.
- id_token_signed_response_alg // defaults to 'RS256'
- request_object_signing_alg // defaults to 'RS256'
- userinfo_signed_response_alg // not in v5
- introspection_signed_response_alg // not in v5
- authorization_signed_response_alg // not in v5
*/
/** @type {import('openid-client').ClientMetadata} */
const clientMetadata = {
client_id: process.env.OPENID_CLIENT_ID,
client_secret: process.env.OPENID_CLIENT_SECRET,
redirect_uris: [process.env.DOMAIN_SERVER + process.env.OPENID_CALLBACK_URL],
};
/** @type {Configuration} */
openidConfig = await client.discovery(
new URL(process.env.OPENID_ISSUER),
process.env.OPENID_CLIENT_ID,
clientMetadata,
);
if (process.env.PROXY) {
const proxyAgent = new HttpsProxyAgent(process.env.PROXY);
openidConfig[client.customFetch] = (...args) => {
return fetch(args[0], { ...args[1], agent: proxyAgent });
};
logger.info(`[openidStrategy] proxy agent added: ${process.env.PROXY}`);
if (isEnabled(process.env.OPENID_SET_FIRST_SUPPORTED_ALGORITHM)) {
clientMetadata.id_token_signed_response_alg =
issuer.id_token_signing_alg_values_supported?.[0] || 'RS256';
}
const client = new issuer.Client(clientMetadata);
const requiredRole = process.env.OPENID_REQUIRED_ROLE;
const requiredRoleParameterPath = process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH;
const requiredRoleTokenKind = process.env.OPENID_REQUIRED_ROLE_TOKEN_KIND;
const usePKCE = isEnabled(process.env.OPENID_USE_PKCE);
const openidLogin = new CustomOpenIDStrategy(
const adminRolesEnv = process.env.OPENID_ADMIN_ROLE;
const adminRoles = adminRolesEnv ? adminRolesEnv.split(',').map((role) => role.trim()) : [];
const openidLogin = new OpenIDStrategy(
{
config: openidConfig,
scope: process.env.OPENID_SCOPE,
callbackURL: process.env.DOMAIN_SERVER + process.env.OPENID_CALLBACK_URL,
usePKCE,
client,
params: { scope: process.env.OPENID_SCOPE },
},
async (tokenset, done) => {
async (tokenset, userinfo, done) => {
try {
const claims = tokenset.claims();
let user = await findUser({ openidId: claims.sub });
logger.info(
`[openidStrategy] user ${user ? 'found' : 'not found'} with openidId: ${claims.sub}`,
logger.info(`[openidStrategy] Verifying login for openidId: ${userinfo.sub}`);
logger.debug('[openidStrategy] Tokenset and userinfo:', { tokenset, userinfo });
// Find an existing user by openidId or email.
let user =
(await findUser({ openidId: userinfo.sub })) ||
(await findUser({ email: userinfo.email }));
const fullName = getFullName(userinfo);
const username = process.env.OPENID_USERNAME_CLAIM
? userinfo[process.env.OPENID_USERNAME_CLAIM]
: convertToUsername(userinfo.username || userinfo.given_name || userinfo.email);
// Use the token specified by configuration to extract roles.
const token =
requiredRoleTokenKind === 'access' ? tokenset.access_token : tokenset.id_token;
const decodedToken = safeDecode(token);
const tokenBasedRoles = extractRolesFromToken(decodedToken, requiredRoleParameterPath);
// Ensure the required role exists.
if (requiredRole && !tokenBasedRoles.includes(requiredRole)) {
return done(null, false, {
message: `You must have the "${requiredRole}" role to log in.`,
});
}
// Determine system role.
const isAdmin = tokenBasedRoles.some((role) => adminRoles.includes(role));
const assignedRole = isAdmin ? SystemRoles.ADMIN : SystemRoles.USER;
logger.debug(
`[openidStrategy] Assigned system role: ${assignedRole} (isAdmin: ${isAdmin})`,
);
if (!user) {
user = await findUser({ email: claims.email });
logger.info(
`[openidStrategy] user ${user ? 'found' : 'not found'} with email: ${
claims.email
} for openidId: ${claims.sub}`,
// Map custom OpenID data if configured.
let customOpenIdData = {};
if (process.env.OPENID_CUSTOM_DATA) {
const dataMapper = OpenIdDataMapper.getMapper(
process.env.OPENID_PROVIDER.toLowerCase(),
);
}
const userinfo = {
...claims,
...(await getUserInfo(openidConfig, tokenset.access_token, claims.sub)),
};
const fullName = getFullName(userinfo);
if (requiredRole) {
let decodedToken = '';
if (requiredRoleTokenKind === 'access') {
decodedToken = jwtDecode(tokenset.access_token);
} else if (requiredRoleTokenKind === 'id') {
decodedToken = jwtDecode(tokenset.id_token);
}
const pathParts = requiredRoleParameterPath.split('.');
let found = true;
let roles = pathParts.reduce((o, key) => {
if (o === null || o === undefined || !(key in o)) {
found = false;
return [];
}
return o[key];
}, decodedToken);
if (!found) {
logger.error(
`[openidStrategy] Key '${requiredRoleParameterPath}' not found in ${requiredRoleTokenKind} token!`,
);
}
if (!roles.includes(requiredRole)) {
return done(null, false, {
message: `You must have the "${requiredRole}" role to log in.`,
});
customOpenIdData = await dataMapper.mapCustomData(
tokenset.access_token,
process.env.OPENID_CUSTOM_DATA,
);
if (tokenBasedRoles.length) {
customOpenIdData.roles = tokenBasedRoles;
} else {
logger.warn('[openidStrategy] tokenBasedRoles is missing or invalid.');
}
}
let username = '';
if (process.env.OPENID_USERNAME_CLAIM) {
username = userinfo[process.env.OPENID_USERNAME_CLAIM];
// Create or update the user.
if (!user) {
user = await createUser(
{
provider: 'openid',
openidId: userinfo.sub,
username,
email: userinfo.email || '',
emailVerified: userinfo.email_verified || false,
name: fullName,
role: assignedRole,
customOpenIdData,
},
true,
true,
);
} else {
username = convertToUsername(
userinfo.username || userinfo.given_name || userinfo.email,
);
}
if (!user) {
user = {
...user,
provider: 'openid',
openidId: userinfo.sub,
username,
email: userinfo.email || '',
emailVerified: userinfo.email_verified || false,
name: fullName,
role: assignedRole,
customOpenIdData,
};
user = await createUser(user, true, true);
} else {
user.provider = 'openid';
user.openidId = userinfo.sub;
user.username = username;
user.name = fullName;
}
if (!!userinfo && userinfo.picture && !user.avatar?.includes('manual=true')) {
/** @type {string | undefined} */
const imageUrl = userinfo.picture;
let fileName;
if (crypto) {
fileName = (await hashToken(userinfo.sub)) + '.png';
} else {
fileName = userinfo.sub + '.png';
}
const imageBuffer = await downloadImage(
imageUrl,
openidConfig,
tokenset.access_token,
userinfo.sub,
);
if (imageBuffer) {
const { saveBuffer } = getStrategyFunctions(process.env.CDN_PROVIDER);
const imagePath = await saveBuffer({
fileName,
userId: user._id.toString(),
buffer: imageBuffer,
});
user.avatar = imagePath ?? '';
}
}
// Update the user's avatar if available.
user = await updateUserAvatar(user, userinfo.picture, tokenset.access_token);
// Persist updated user data.
user = await updateUser(user._id, user);
logger.info(
`[openidStrategy] login success openidId: ${user.openidId} | email: ${user.email} | username: ${user.username} `,
`[openidStrategy] Login success for openidId: ${user.openidId} | email: ${user.email} | username: ${user.username}`,
{
user: {
openidId: user.openidId,
@@ -339,35 +299,18 @@ async function setupOpenId() {
},
},
);
done(null, { ...user, tokenset });
done(null, user);
} catch (err) {
logger.error('[openidStrategy] login failed', err);
logger.error('[openidStrategy] Login failed', err);
done(err);
}
},
);
passport.use('openid', openidLogin);
return openidConfig;
} catch (err) {
logger.error('[openidStrategy]', err);
return null;
}
}
/**
* @function getOpenIdConfig
* @description Returns the OpenID client instance.
* @throws {Error} If the OpenID client is not initialized.
* @returns {Configuration}
*/
function getOpenIdConfig() {
if (!openidConfig) {
throw new Error('OpenID client is not initialized. Please call setupOpenId first.');
}
return openidConfig;
}
module.exports = {
setupOpenId,
getOpenIdConfig,
};
module.exports = setupOpenId;

View File

@@ -1,10 +1,13 @@
const fetch = require('node-fetch');
const jwtDecode = require('jsonwebtoken/decode');
const { Issuer, Strategy: OpenIDStrategy } = require('openid-client');
const { findUser, createUser, updateUser } = require('~/models/userMethods');
const { setupOpenId } = require('./openidStrategy');
const setupOpenId = require('./openidStrategy');
const OpenIdDataMapper = require('./OpenId/openidDataMapper');
// --- Mocks ---
jest.mock('node-fetch');
jest.mock('openid-client');
jest.mock('jsonwebtoken/decode');
jest.mock('~/server/services/Files/strategies', () => ({
getStrategyFunctions: jest.fn(() => ({
@@ -27,66 +30,31 @@ jest.mock('~/config', () => ({
info: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
},
}));
jest.mock('~/cache/getLogStores', () =>
jest.fn(() => ({
get: jest.fn(),
set: jest.fn(),
})),
);
jest.mock('librechat-data-provider', () => ({
CacheKeys: {
OPENID_EXCHANGED_TOKENS: 'openid-exchanged-tokens',
},
}));
// Mock the openid-client module and all its dependencies
jest.mock('openid-client', () => {
return {
discovery: jest.fn().mockResolvedValue({
clientId: 'fake_client_id',
clientSecret: 'fake_client_secret',
issuer: 'https://fake-issuer.com',
// Add any other properties needed by the implementation
}),
fetchUserInfo: jest.fn().mockImplementation((config, accessToken, sub) => {
// Only return additional properties, but don't override any claims
return Promise.resolve({
preferred_username: 'preferred_username',
});
}),
customFetch: Symbol('customFetch'),
};
// Mock Issuer.discover so that setupOpenId gets a fake issuer and client
Issuer.discover = jest.fn().mockResolvedValue({
id_token_signing_alg_values_supported: ['RS256'],
Client: jest.fn().mockImplementation((clientMetadata) => {
return {
metadata: clientMetadata,
};
}),
});
jest.mock('openid-client/passport', () => {
let verifyCallback;
const mockStrategy = jest.fn((options, verify) => {
verifyCallback = verify;
return { name: 'openid', options, verify };
});
return {
Strategy: mockStrategy,
__getVerifyCallback: () => verifyCallback,
};
// Capture the verify callback from the strategy via the mock constructor
let verifyCallback;
OpenIDStrategy.mockImplementation((options, verify) => {
verifyCallback = verify;
return { name: 'openid', options, verify };
});
// Mock passport
jest.mock('passport', () => ({
use: jest.fn(),
}));
describe('setupOpenId', () => {
// Store a reference to the verify callback once it's set up
let verifyCallback;
// Helper to wrap the verify callback in a promise
const validate = (tokenset) =>
const validate = (tokenset, userinfo) =>
new Promise((resolve, reject) => {
verifyCallback(tokenset, (err, user, details) => {
verifyCallback(tokenset, userinfo, (err, user, details) => {
if (err) {
reject(err);
} else {
@@ -98,20 +66,20 @@ describe('setupOpenId', () => {
const tokenset = {
id_token: 'fake_id_token',
access_token: 'fake_access_token',
claims: () => ({
sub: '1234',
email: 'test@example.com',
email_verified: true,
given_name: 'First',
family_name: 'Last',
name: 'My Full',
username: 'flast',
picture: 'https://example.com/avatar.png',
}),
};
const baseUserinfo = {
sub: '1234',
email: 'test@example.com',
email_verified: true,
given_name: 'First',
family_name: 'Last',
name: 'My Full',
username: 'flast',
picture: 'https://example.com/avatar.png',
};
beforeEach(async () => {
// Clear previous mock calls and reset implementations
jest.clearAllMocks();
// Reset environment variables needed by the strategy
@@ -127,7 +95,8 @@ describe('setupOpenId', () => {
delete process.env.OPENID_USERNAME_CLAIM;
delete process.env.OPENID_NAME_CLAIM;
delete process.env.PROXY;
delete process.env.OPENID_USE_PKCE;
delete process.env.OPENID_CUSTOM_DATA;
delete process.env.OPENID_PROVIDER;
// Default jwtDecode mock returns a token that includes the required role.
jwtDecode.mockReturnValue({
@@ -152,19 +121,13 @@ describe('setupOpenId', () => {
};
fetch.mockResolvedValue(fakeResponse);
// Call the setup function and capture the verify callback
// Finally, call the setup function so that passport.use gets called
await setupOpenId();
verifyCallback = require('openid-client/passport').__getVerifyCallback();
});
it('should create a new user with correct username when username claim exists', async () => {
// Arrange our userinfo already has username 'flast'
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);
// Assert
const userinfo = { ...baseUserinfo };
const { user } = await validate(tokenset, userinfo);
expect(user.username).toBe(userinfo.username);
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({
@@ -180,16 +143,10 @@ describe('setupOpenId', () => {
});
it('should use given_name as username when username claim is missing', async () => {
// Arrange remove username from userinfo
const userinfo = { ...tokenset.claims() };
const userinfo = { ...baseUserinfo };
delete userinfo.username;
// Expect the username to be the given name (unchanged case)
const expectUsername = userinfo.given_name;
// Act
const { user } = await validate({ ...tokenset, claims: () => userinfo });
// Assert
const { user } = await validate(tokenset, userinfo);
expect(user.username).toBe(expectUsername);
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({ username: expectUsername }),
@@ -199,16 +156,11 @@ describe('setupOpenId', () => {
});
it('should use email as username when username and given_name are missing', async () => {
// Arrange remove username and given_name
const userinfo = { ...tokenset.claims() };
const userinfo = { ...baseUserinfo };
delete userinfo.username;
delete userinfo.given_name;
const expectUsername = userinfo.email;
// Act
const { user } = await validate({ ...tokenset, claims: () => userinfo });
// Assert
const { user } = await validate(tokenset, userinfo);
expect(user.username).toBe(expectUsername);
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({ username: expectUsername }),
@@ -218,14 +170,9 @@ describe('setupOpenId', () => {
});
it('should override username with OPENID_USERNAME_CLAIM when set', async () => {
// Arrange set OPENID_USERNAME_CLAIM so that the sub claim is used
process.env.OPENID_USERNAME_CLAIM = 'sub';
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);
// Assert username should equal the sub (converted as-is)
const userinfo = { ...baseUserinfo };
const { user } = await validate(tokenset, userinfo);
expect(user.username).toBe(userinfo.sub);
expect(createUser).toHaveBeenCalledWith(
expect.objectContaining({ username: userinfo.sub }),
@@ -235,112 +182,83 @@ describe('setupOpenId', () => {
});
it('should set the full name correctly when given_name and family_name exist', async () => {
// Arrange
const userinfo = tokenset.claims();
const userinfo = { ...baseUserinfo };
const expectedFullName = `${userinfo.given_name} ${userinfo.family_name}`;
// Act
const { user } = await validate(tokenset);
// Assert
const { user } = await validate(tokenset, userinfo);
expect(user.name).toBe(expectedFullName);
});
it('should override full name with OPENID_NAME_CLAIM when set', async () => {
// Arrange use the name claim as the full name
process.env.OPENID_NAME_CLAIM = 'name';
const userinfo = { ...tokenset.claims(), name: 'Custom Name' };
// Act
const { user } = await validate({ ...tokenset, claims: () => userinfo });
// Assert
const userinfo = { ...baseUserinfo, name: 'Custom Name' };
const { user } = await validate(tokenset, userinfo);
expect(user.name).toBe('Custom Name');
});
it('should update an existing user on login', async () => {
// Arrange simulate that a user already exists
const existingUser = {
_id: 'existingUserId',
provider: 'local',
email: tokenset.claims().email,
email: baseUserinfo.email,
openidId: '',
username: '',
name: '',
};
findUser.mockImplementation(async (query) => {
if (query.openidId === tokenset.claims().sub || query.email === tokenset.claims().email) {
if (query.openidId === baseUserinfo.sub || query.email === baseUserinfo.email) {
return existingUser;
}
return null;
});
const userinfo = tokenset.claims();
// Act
await validate(tokenset);
// Assert updateUser should be called and the user object updated
const userinfo = { ...baseUserinfo };
await validate(tokenset, userinfo);
expect(updateUser).toHaveBeenCalledWith(
existingUser._id,
expect.objectContaining({
provider: 'openid',
openidId: userinfo.sub,
username: userinfo.username,
name: `${userinfo.given_name} ${userinfo.family_name}`,
openidId: baseUserinfo.sub,
username: baseUserinfo.username,
name: `${baseUserinfo.given_name} ${baseUserinfo.family_name}`,
}),
);
});
it('should enforce the required role and reject login if missing', async () => {
// Arrange simulate a token without the required role.
jwtDecode.mockReturnValue({
roles: ['SomeOtherRole'],
});
const userinfo = tokenset.claims();
// Act
const { user, details } = await validate(tokenset);
// Assert verify that the strategy rejects login
const userinfo = { ...baseUserinfo };
const { user, details } = await validate(tokenset, userinfo);
expect(user).toBe(false);
expect(details.message).toBe('You must have the "requiredRole" role to log in.');
});
it('should attempt to download and save the avatar if picture is provided', async () => {
// Arrange ensure userinfo contains a picture URL
const userinfo = tokenset.claims();
// Act
const { user } = await validate(tokenset);
// Assert verify that download was attempted and the avatar field was set via updateUser
const userinfo = { ...baseUserinfo };
const { user } = await validate(tokenset, userinfo);
expect(fetch).toHaveBeenCalled();
// Our mock getStrategyFunctions.saveBuffer returns '/fake/path/to/avatar.png'
expect(user.avatar).toBe('/fake/path/to/avatar.png');
});
it('should not attempt to download avatar if picture is not provided', async () => {
// Arrange remove picture
const userinfo = { ...tokenset.claims() };
const userinfo = { ...baseUserinfo };
delete userinfo.picture;
// Act
await validate({ ...tokenset, claims: () => userinfo });
// Assert fetch should not be called and avatar should remain undefined or empty
const { user } = await validate(tokenset, userinfo);
expect(fetch).not.toHaveBeenCalled();
// Depending on your implementation, user.avatar may be undefined or an empty string.
expect(user.avatar).toBeFalsy();
});
it('should default to usePKCE false when OPENID_USE_PKCE is not defined', async () => {
const OpenIDStrategy = require('openid-client/passport').Strategy;
it('should map customOpenIdData as an object when OPENID_CUSTOM_DATA is set', async () => {
process.env.OPENID_CUSTOM_DATA = 'some,fields';
process.env.OPENID_PROVIDER = 'microsoft';
const fakeCustomData = { foo: 'bar' };
const fakeDataMapper = { mapCustomData: jest.fn().mockResolvedValue(fakeCustomData) };
OpenIdDataMapper.getMapper = jest.fn(() => fakeDataMapper);
delete process.env.OPENID_USE_PKCE;
await setupOpenId();
const callOptions = OpenIDStrategy.mock.calls[OpenIDStrategy.mock.calls.length - 1][0];
expect(callOptions.usePKCE).toBe(false);
expect(callOptions.params?.code_challenge_method).toBeUndefined();
const userinfo = { ...baseUserinfo };
const { user } = await validate(tokenset, userinfo);
expect(OpenIdDataMapper.getMapper).toHaveBeenCalledWith('microsoft');
expect(fakeDataMapper.mapCustomData).toHaveBeenCalledWith(tokenset.access_token, 'some,fields');
expect(user.customOpenIdData).toEqual({ ...fakeCustomData, roles: ['requiredRole'] });
});
});

View File

@@ -7,8 +7,7 @@ const socialLogin =
(provider, getProfileDetails) => async (accessToken, refreshToken, idToken, profile, cb) => {
try {
const { email, id, avatarUrl, username, name, emailVerified } = getProfileDetails({
idToken,
profile,
idToken, profile,
});
const oldUser = await findUser({ email: email.trim() });

View File

@@ -8,7 +8,6 @@ jest.mock('winston', () => {
mockFormatFunction.printf = jest.fn();
mockFormatFunction.errors = jest.fn();
mockFormatFunction.splat = jest.fn();
mockFormatFunction.json = jest.fn();
return {
format: mockFormatFunction,
createLogger: jest.fn().mockReturnValue({
@@ -20,7 +19,6 @@ jest.mock('winston', () => {
transports: {
Console: jest.fn(),
DailyRotateFile: jest.fn(),
File: jest.fn(),
},
addColors: jest.fn(),
};

View File

@@ -1,6 +0,0 @@
// api/test/__mocks__/openid-client-passport.js
const Strategy = jest.fn().mockImplementation((options, verify) => {
return { name: 'mocked-openid-passport-strategy', options, verify };
});
module.exports = { Strategy };

View File

@@ -1,67 +0,0 @@
// api/test/__mocks__/openid-client.js
module.exports = {
Issuer: {
discover: jest.fn().mockResolvedValue({
Client: jest.fn().mockImplementation(() => ({
authorizationUrl: jest.fn().mockReturnValue('mock_auth_url'),
callback: jest.fn().mockResolvedValue({
access_token: 'mock_access_token',
id_token: 'mock_id_token',
claims: () => ({
sub: 'mock_sub',
email: 'mock@example.com',
}),
}),
userinfo: jest.fn().mockResolvedValue({
sub: 'mock_sub',
email: 'mock@example.com',
}),
})),
}),
},
Strategy: jest.fn().mockImplementation((options, verify) => {
// Store verify to call it if needed, or just mock the strategy behavior
return { name: 'openid-mock-strategy' };
}),
custom: {
setHttpOptionsDefaults: jest.fn(),
},
// Add any other exports from openid-client that are used directly
// For example, if your code uses `client.Issuer.discover`, then mock `Issuer`
// If it uses `new Strategy()`, then mock `Strategy`
// Based on openidStrategy.js, it uses:
// const client = require('openid-client'); -> client.discovery, client.fetchUserInfo, client.genericGrantRequest
// const { Strategy: OpenIDStrategy } = require('openid-client/passport');
// So the mock needs to cover these.
// The provided mock in openidStrategy.spec.js is a good reference.
// Simpler mock based on the spec file:
discovery: jest.fn().mockResolvedValue({
clientId: 'fake_client_id',
clientSecret: 'fake_client_secret',
issuer: 'https://fake-issuer.com',
Client: jest.fn().mockImplementation(() => ({
authorizationUrl: jest.fn().mockReturnValue('mock_auth_url'),
callback: jest.fn().mockResolvedValue({
access_token: 'mock_access_token',
id_token: 'mock_id_token',
claims: () => ({
sub: 'mock_sub',
email: 'mock@example.com',
}),
}),
userinfo: jest.fn().mockResolvedValue({
sub: 'mock_sub',
email: 'mock@example.com',
}),
grant: jest.fn().mockResolvedValue({ access_token: 'mock_grant_token' }), // For genericGrantRequest
})),
}),
fetchUserInfo: jest.fn().mockResolvedValue({
preferred_username: 'preferred_username',
}),
genericGrantRequest: jest
.fn()
.mockResolvedValue({ access_token: 'mock_grant_access_token', expires_in: 3600 }),
customFetch: Symbol('customFetch'),
};

View File

@@ -6,7 +6,3 @@ process.env.BAN_VIOLATIONS = 'true';
process.env.BAN_DURATION = '7200000';
process.env.BAN_INTERVAL = '20';
process.env.CI = 'true';
process.env.JWT_SECRET = 'test';
process.env.JWT_REFRESH_SECRET = 'test';
process.env.CREDS_KEY = 'test';
process.env.CREDS_IV = 'test';

View File

@@ -55,12 +55,6 @@
* @memberof typedefs
*/
/**
* @exports MessageContentComplex
* @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex
* @memberof typedefs
*/
/**
* @exports EventHandler
* @typedef {import('@librechat/agents').EventHandler} EventHandler
@@ -192,8 +186,6 @@
* agent_index: number;
* last_agent_index: number;
* hide_sequential_outputs: boolean;
* version?: 'v1' | 'v2';
* streamMode?: string
* }> & {
* toolCall?: LangChainToolCall & { stepId?: string };
* }} GraphRunnableConfig
@@ -481,25 +473,6 @@
* @typedef {import('librechat-data-provider').Agents.MessageContentImageUrl} MessageContentImageUrl
* @memberof typedefs
*/
/** Web Search */
/**
* @exports SearchResult
* @typedef {import('@librechat/agents').SearchResult} SearchResult
* @memberof typedefs
*/
/**
* @exports SearchResultData
* @typedef {import('@librechat/agents').SearchResultData} SearchResultData
* @memberof typedefs
*/
/**
* @exports ValidSource
* @typedef {import('librechat-data-provider').ValidSource} ValidSource
* @memberof typedefs
*/
/** Prompts */
/**
@@ -875,12 +848,6 @@
* @memberof typedefs
*/
/**
* @exports IPluginAuth
* @typedef {import('@librechat/data-schemas').IPluginAuth} IPluginAuth
* @memberof typedefs
*/
/**
* @exports ObjectId
* @typedef {import('mongoose').Types.ObjectId} ObjectId
@@ -1023,18 +990,6 @@
* @memberof typedefs
*/
/**
* @exports TEphemeralAgent
* @typedef {import('librechat-data-provider').TEphemeralAgent} TEphemeralAgent
* @memberof typedefs
*/
/**
* @exports TWebSearchKeys
* @typedef {import('librechat-data-provider').TWebSearchKeys} TWebSearchKeys
* @memberof typedefs
*/
/**
* @exports AgentToolResources
* @typedef {import('librechat-data-provider').AgentToolResources} AgentToolResources

View File

@@ -105,9 +105,6 @@ const anthropicModels = {
'claude-3.7-sonnet': 200000,
'claude-3-5-sonnet-latest': 200000,
'claude-3.5-sonnet-latest': 200000,
'claude-sonnet-4': 200000,
'claude-opus-4': 200000,
'claude-4': 200000,
};
const deepseekModels = {
@@ -249,8 +246,6 @@ const anthropicMaxOutputs = {
'claude-3-haiku': 4096,
'claude-3-sonnet': 4096,
'claude-3-opus': 4096,
'claude-opus-4': 32000,
'claude-sonnet-4': 64000,
'claude-3.5-sonnet': 8192,
'claude-3-5-sonnet': 8192,
'claude-3.7-sonnet': 128000,

View File

@@ -649,58 +649,3 @@ describe('Grok Model Tests - Tokens', () => {
});
});
});
describe('Claude Model Tests', () => {
it('should return correct context length for Claude 4 models', () => {
expect(getModelMaxTokens('claude-sonnet-4')).toBe(200000);
expect(getModelMaxTokens('claude-opus-4')).toBe(200000);
});
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
const modelVariations = [
'claude-sonnet-4',
'claude-sonnet-4-20240229',
'claude-sonnet-4-latest',
'anthropic/claude-sonnet-4',
'claude-sonnet-4/anthropic',
'claude-sonnet-4-preview',
'claude-sonnet-4-20240229-preview',
'claude-opus-4',
'claude-opus-4-20240229',
'claude-opus-4-latest',
'anthropic/claude-opus-4',
'claude-opus-4/anthropic',
'claude-opus-4-preview',
'claude-opus-4-20240229-preview',
];
modelVariations.forEach((model) => {
expect(getModelMaxTokens(model)).toBe(200000);
});
});
it('should match model names correctly for Claude 4 models', () => {
const modelVariations = [
'claude-sonnet-4',
'claude-sonnet-4-20240229',
'claude-sonnet-4-latest',
'anthropic/claude-sonnet-4',
'claude-sonnet-4/anthropic',
'claude-sonnet-4-preview',
'claude-sonnet-4-20240229-preview',
'claude-opus-4',
'claude-opus-4-20240229',
'claude-opus-4-latest',
'anthropic/claude-opus-4',
'claude-opus-4/anthropic',
'claude-opus-4-preview',
'claude-opus-4-20240229-preview',
];
modelVariations.forEach((model) => {
const isSonnet = model.includes('sonnet');
const expectedModel = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
expect(matchModelName(model, EModelEndpoint.anthropic)).toBe(expectedModel);
});
});
});

View File

View File

@@ -0,0 +1,4 @@
apiVersion: v2
name: librechat
type: application
version: 1.0.0

View File

@@ -15,7 +15,7 @@
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "librechat.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "librechat.fullname" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "librechat.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT

View File

@@ -1,28 +1,34 @@
{{/*
Create chart name and version as used by the chart label.
Expand the name of the chart.
*/}}
{{- define "librechat.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- define "librechat.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "librechat.fullname" -}}
{{- if $.Values.fullnameOverride }}
{{- $.Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "librechat.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
@@ -40,16 +46,7 @@ app.kubernetes.io/managed-by: {{ .Release.Service }}
Selector labels
*/}}
{{- define "librechat.selectorLabels" -}}
app.kubernetes.io/name: {{ include "librechat.fullname" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
RAG Selector labels
*/}}
{{- define "rag.selectorLabels" -}}
app.kubernetes.io/name: {{ include "librechat.fullname" . }}-rag
app.kubernetes.io/name: {{ include "librechat.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
@@ -63,3 +60,15 @@ Create the name of the service account to use
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Print string from list split by ,
*/}}
{{- define "model.list" -}}
{{- range $idx, $val := $.Values.configEndpoint.models -}}
{{- if $idx }}
{{- print ", " -}}
{{- end -}}
{{- $val -}}
{{- end -}}
{{- end -}}

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "librechat.fullname" . }}-env
labels:
{{- include "librechat.labels" . | nindent 4 }}
data:
{{- range $key, $val := .Values.config.env }}
{{ $key }}: {{ $val | quote }}
{{- end }}

View File

@@ -0,0 +1,81 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "librechat.fullname" . }}
labels:
{{- include "librechat.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "librechat.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "librechat.selectorLabels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "librechat.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
envFrom:
{{ if .Values.config.envSecrets.secretRef }}
- secretRef:
name: {{ .Values.config.envSecrets.secretRef }}
{{- end }}
- configMapRef:
name: {{ include "librechat.fullname" . }}-env
env:
{{- range $secretKeyRef := .Values.config.envSecrets.secretKeyRef }}
- name: {{ $secretKeyRef.name }}
valueFrom:
secretKeyRef:
name: {{ $secretKeyRef.secretName }}
key: {{ $secretKeyRef.secretKey }}
{{- end }}
ports:
- name: http
containerPort: 3080
protocol: TCP
livenessProbe:
initialDelaySeconds: 5
httpGet:
path: /
port: http
readinessProbe:
initialDelaySeconds: 5
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,28 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "librechat.fullname" $ }}
labels:
{{- include "librechat.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "librechat.fullname" $ }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "librechat.fullname" . }}
labels:
{{- include "librechat.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "librechat.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -1,4 +1,5 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "librechat.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
@@ -14,7 +15,7 @@ apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ include "librechat.fullname" $ }}
name: {{ $fullName }}
labels:
{{- include "librechat.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
@@ -48,11 +49,11 @@ spec:
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ include "librechat.fullname" $ }}
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ include "librechat.fullname" $ }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}

View File

@@ -4,8 +4,6 @@ metadata:
name: {{ include "librechat.fullname" . }}
labels:
{{- include "librechat.labels" . | nindent 4 }}
annotations:
{{- toYaml .Values.service.annotations | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:

View File

@@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "librechat.serviceAccountName" . }}
labels:
{{- include "librechat.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,112 @@
# Default values for librechat.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: ghcr.io/danny-avila/librechat
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
networkPolicies:
enabled: true
service:
type: LoadBalancer
port: 80
ingress:
enabled: true
className: "nginx"
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chat.example.com
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
config:
envSecrets:
# Use this when using one k8s secret for multiply env secrets
# secretRef: librechat
# Use this when using one k8s secret for each env secret
secretKeyRef: []
# - name: CREDS_IV
# secretName: librechat
# secretKey: CREDS_IV
env:
# Full list of possible values
# https://github.com/danny-avila/LibreChat/blob/main/.env.example
ALLOW_EMAIL_LOGIN: "true"
ALLOW_REGISTRATION: "true"
ALLOW_SOCIAL_LOGIN: "false"
ALLOW_SOCIAL_REGISTRATION: "false"
APP_TITLE: "Librechat"
CUSTOM_FOOTER: "Provided with ❤️"
DEBUG_CONSOLE: "true"
DEBUG_LOGGING: "true"
DEBUG_OPENAI: "true"
DEBUG_PLUGINS: "true"
DOMAIN_CLIENT: ""
DOMAIN_SERVER: ""
ENDPOINTS: "openAI,azureOpenAI,chatGPTBrowser,google,gptPlugins,anthropic"
SEARCH: false

View File

@@ -1,9 +0,0 @@
import { createContext, useContext } from 'react';
import type { SearchResultData } from 'librechat-data-provider';
type SearchContext = {
searchResults?: { [key: string]: SearchResultData };
};
export const SearchContext = createContext<SearchContext>({} as SearchContext);
export const useSearchContext = () => useContext(SearchContext);

View File

@@ -20,4 +20,3 @@ export * from './ArtifactContext';
export * from './CodeBlockContext';
export * from './ToolCallsMapContext';
export * from './SetConvoContext';
export * from './SearchContext';

View File

@@ -10,7 +10,6 @@ export type TAgentOption = OptionWithIcon &
};
export type TAgentCapabilities = {
[AgentCapabilities.web_search]: boolean;
[AgentCapabilities.file_search]: boolean;
[AgentCapabilities.execute_code]: boolean;
[AgentCapabilities.end_after_tools]?: boolean;

Some files were not shown because too many files have changed in this diff Show More