Compare commits

...

144 Commits

Author SHA1 Message Date
Dustin Healy
da611a634a feat: add STT support for Upload as Text 2025-08-04 22:34:14 -07:00
Dustin Healy
23945b3434 📤 feat: Add RAG API Endpoint Support for Text Parsing (#8849)
* feat: implement RAG API integration for text parsing with fallback to native parsing

* chore: remove TODO now that placeholder and fllback are implemented
2025-08-04 18:44:02 -07:00
Dustin Healy
cde9f058af 🪶 feat: Add Support for Uploading Plaintext Files
feat: delineate between OCR and text handling in fileConfig field of config file

- also adds support for passing in mimetypes as just plain file extensions

feat: add showLabel bool to support future synthetic component DynamicDropdownInput

feat: add new combination dropdown-input component in params panel to support file type token limits

refactor: move hovercard to side to align with other hovercards

chore: clean up autogenerated comments

feat: add delineation to file upload path between text and ocr configured filetypes

feat: add token limit checks during file upload

refactor: move textParsing out of ocrEnabled logic

refactor: clean up types for filetype config

refactor: finish decoupling DynamicDropdownInput from fileTokenLimits

fix: move image token cost function into file to fix circular dependency causing unittest to fail and remove unused var for linter

chore: remove out of scope code following review

refactor: make fileTokenLimit conform to existing styles

chore: remove unused localization string

chore: undo changes to DynamicInput and other strays

feat: add fileTokenLimit to all provider config panels

fix: move textParsing back into ocr tool_resource block for now so that it doesn't interfere with other upload types
2025-08-04 16:39:03 -07:00
Danny Avila
4175a3ea19 v0.8.0-rc1 (#8846) 2025-08-04 15:25:07 -04:00
github-actions[bot]
02dc71f4b7 🌍 i18n: Update translation.json with latest translations (#8845)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-04 15:24:24 -04:00
github-actions[bot]
a6c99a3267 🌍 i18n: Update translation.json with latest translations (#8828)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-04 14:54:07 -04:00
SollalF
fcefc6eedf feat: Add OpenID Audience Parameter (#8837)
*  feat: Add OpenID audience parameter support in authorization requests

* Updated .env.example to include OPENID_AUDIENCE variable for configuration.
* Enhanced openidStrategy to set the audience parameter in authorization requests if specified, improving OpenID integration.

* Update .env.example

* Update openidStrategy.js

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-08-04 14:49:36 -04:00
Marco Beretta
dfdafdbd09 🖌️ feat: add animation styles for popovers and tooltips (#8831)
* feat: Update client version to 0.2.2 and add animation styles for popovers and tooltips

* refactor: Remove focus outline styles from Dropdown component

* feat: Update client version to 0.2.3 and add Select component export

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-08-04 14:44:00 -04:00
Danny Avila
33834cd484 🧹 feat: Automatic File Cleanup for Mistral OCR Uploads (#8827)
* chore: Handle optional token_endpoint in OAuth metadata discovery

* chore: Simplify permission typing logic in checkAccess function

* feat: Implement `deleteMistralFile` function and integrate file cleanup in `uploadMistralOCR`
2025-08-03 17:11:14 -04:00
Danny Avila
7ef2c626e2 🛠️ feat: Add Reset-Meili-Sync Script for MongoDB Flags (#8823) 2025-08-02 18:04:04 -04:00
Danny Avila
bc43423f58 🌍 i18n: Add Tibetan and Ukrainian languages to localization (#8819)
* 🌍 i18n: Add Tibetan and Ukrainian languages to localization

* feat: Update language selector to include Tibetan and Ukrainian options
* feat: Add translation files for Tibetan and Ukrainian languages
* chore: Update English translation.json with new language keys
* docs: Create localization guide for adding new languages

* Update README.md
2025-08-02 12:37:18 -04:00
Danny Avila
863401bcdf 🔧 fix: Assistants API SDK calls to match Updated Arguments (#8818)
* chore: remove comments in agents endpoint error handler

* chore: improve openai sdk typing

* chore: improve typing for azure asst init

* 🔧 fix: Assistants API SDK calls to match Updated Arguments
2025-08-02 12:19:58 -04:00
Danny Avila
33c8b87edd 📦 chore: Bump @modelcontextprotocol/sdk to v1.17.1 (#8809) 2025-08-01 16:10:12 -04:00
github-actions[bot]
077248a8a7 🌍 i18n: Update translation.json with latest translations (#8808)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-08-01 15:55:42 -04:00
Danny Avila
c6fb4686ef 🌐 ci: Bump Locize i18n Action Version 2025-08-01 15:52:00 -04:00
Dustin Healy
f1c6e4d55e 🧪 ci: Unit Tests for MCP Routes (#8803)
* refactor: remove unreachable if checks from mcp routes

* test: add tests for mcp routes
2025-08-01 14:47:00 -04:00
William Kim
e192c99c7d 🔧 fix: Apply Convo Export filename sanitization at export, not input (#8779)
Co-authored-by: Woosub Kim <woosub.kim@navercorp.com>
2025-07-31 07:28:33 -04:00
wartek69
056172f007 🔒 feat: MCP OAuth Config for Metadata Parameters (#8691)
* fix(mcp): add default metadata for pre-configured oauth

* removed lingering comment

* added configurable options & jest unit tests

* Update handler.test.ts

* Update handler.ts

---------

Co-authored-by: Alex <aleksander.chernyavskiy@seafar.eu>
Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-07-31 07:24:49 -04:00
github-actions[bot]
5eed5009e9 🌍 i18n: Update translation.json with latest translations (#8771)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-30 15:59:27 -04:00
Danny Avila
6fc9abd4ad ✂️ fix: Remove Image Payloads from Memory Processing (#8770) 2025-07-30 15:54:33 -04:00
github-actions[bot]
03a924eaca 🌍 i18n: Update translation.json with latest translations (#8739)
* 🌍 i18n: Update translation.json with latest translations

* chore: Remove unused translation keys for MCP custom variables

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-30 15:23:12 -04:00
Danny Avila
25c993d93e 📦 chore: bump @librechat/agents to v2.4.69 (#8769) 2025-07-30 15:18:17 -04:00
Marco Beretta
09659c1040 🔨 style: Improve MCP UI (#8745)
* refactor: Enhance MCP components with improved UI elements and localization updates

* refactor: Clean up MCP components by removing unused imports and improving layout

* refactor: Update server status badge styling for improved UI consistency

* refactor: Move group up a level so 'X' and background highlight occur at same time for cancellation button

* refactor: Remove unused translation keys from the localization file

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
2025-07-30 14:56:22 -04:00
Josh Mullin
19a8f5c545 🐦 fix: Prioritize OIDC Username Claims to Prevent First Name Usernames (#8695)
Now prioritizes preferred_username claim, then the nonstandard
username claim, then email.

Removed given_name as a possible username choice to avoid exposing users’ first names as
usernames.

Updated openidStrategy.spec.js to reflect the new claim order.

Fixed mock OpenID server behavior where preferred_username was always
hardcoded, causing test failures.

Adjusted OpenID setup test to align with new username parameter
behavior.
2025-07-30 14:43:42 -04:00
Dustin Healy
1050346915 feat: Add Support for customUserVar Replacement in 'args' Field (#8743) 2025-07-30 14:37:56 -04:00
Marco Beretta
8a1a38f346 🔑 fix: Update Conversation Mutation to use ID from Payload (#8758) 2025-07-30 14:34:30 -04:00
Danny Avila
32081245da 🪵 chore: Remove Unnecessary Comments 2025-07-29 14:59:58 -04:00
Dustin Healy
6fd3b569ac ⚒️ fix: MCP Initialization Flows (#8734)
* fix: add OAuth flow back in to success state

* feat: disable server clicks during initialization to prevent spam

* fix: correct new tab behavior for OAuth between one-click and normal initialization flows

* fix: stop polling on error during oauth (was infinite popping toasts because we didn't clear interval)

* fix: cleanupServerState should be called after successful cancelOauth, not before

* fix: change from completeFlow to failFlow to avoid stale client IDs on OAuth after cancellation

* fix: add logic to differentiate between cancelled and failed flows when checking status for indicators (so error triangle indicator doesn't show up on cancellaiton)
2025-07-29 14:54:07 -04:00
Jakub Hrozek
6671fcb714 🛂 refactor: Use discoverAuthorizationServerMetadata for MCP OAuth (#8723)
* Use discoverAuthorizationServerMetadata instead of discoverMetadata

Uses the discoverAuthorizationServerMetadata function from the upstream
TS SDK. This has the advantage of falling back to OIDC discovery
metadata if the OAuth discovery metadata doesn't exist which is the case
with e.g. keycloak.

* chore: import order

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-07-29 09:09:52 -04:00
Dustin Healy
c4677ab3fb 🔑 refactor: MCP Settings Rendering Logic for OAuth Servers (#8718)
* feat: add OAuth servers to conditional rendering logic for MCPPanel in SideNav

* feat: add startup flag check to conditional rendering logic

* fix: correct improper handling of failure state in reinitialize endpoint

* fix: change MCP config components to better handle servers without customUserVars

- removes the subtle reinitialize button from config components of servers without customUserVars or OAuth
- adds a placeholder message for components where servers have no customUserVars configured

* style: swap CustomUserVarsSection and ServerInitializationSection positions

* style: fix coloring for light mode and align more with existing design patterns

* chore: remove extraneous comments

* chore: reorder imports and `isEnabled` from api package

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-29 09:08:46 -04:00
github-actions[bot]
ef9d9b1276 🌍 i18n: Update translation.json with latest translations (#8676)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-28 15:15:06 -04:00
Danny Avila
a4ca4b7d9d 🔀 fix: Rerender Edge Cases After Migration to Shared Package (#8713)
* fix: render issues in PromptForm by decoupling nested dependencies as a result of @librechat/client components

* fix: MemoryViewer flicker by moving EditMemoryButton and DeleteMemoryButton outside of rendering

* fix: CategorySelector to use DropdownPopup for improved mobile compatibility

* chore: imports
2025-07-28 15:14:37 -04:00
Danny Avila
8e6eef04ab 🔧 fix: Update Proxy Config for OpenAI Image Tools (#8712)
- Replaced HttpsProxyAgent with ProxyAgent from undici for improved proxy handling in DALLE3.js and OpenAIImageTools.js.
- Updated fetchOptions to use dispatcher for proxy configuration.
- Added new test suite for DALLE3 to verify proxy configuration behavior based on environment variables.
2025-07-28 15:12:29 -04:00
Danny Avila
ec3cbca6e3 feat: Enhance Redis Config and Error Handling (#8709)
*  feat: Enhance Redis Config and Error Handling

- Added new Redis configuration options: `REDIS_RETRY_MAX_DELAY`, `REDIS_RETRY_MAX_ATTEMPTS`, `REDIS_CONNECT_TIMEOUT`, and `REDIS_ENABLE_OFFLINE_QUEUE` to improve connection resilience.
- Implemented error handling for Redis cache creation and session store initialization in `cacheFactory.js`.
- Enhanced logging for Redis client events and errors in `redisClients.js`.
- Updated `README.md` to document new Redis configuration options.

* chore: Add JSDoc comments to Redis configuration options in cacheConfig.js for improved clarity and documentation

* ci: update cacheFactory tests

* refactor: remove fallback

* fix: Improve error handling in Redis cache creation, re-throw errors when expected
2025-07-28 14:21:39 -04:00
Dustin Healy
4639dc3255 🐜 fix: Forward Ref to MCPSubMenu and ArtifactsSubMenu (#8696)
ToolsDropdown uses a menu library that passes refs to submenu items. Function components can't receive refs by default though, so we get  "Function components cannot be given refs" warnings in the console. React.forwardRef() allows them to properly handle ref forwarding by wrapping the component and attaching the ref to the outer div element.
2025-07-28 12:26:11 -04:00
Dustin Healy
0ef3fefaec 🏹 feat: Concurrent MCP Initialization Support (#8677)
*  feat: Enhance MCP Connection Status Management

- Introduced new functions to retrieve and manage connection status for multiple MCP servers, including OAuth flow checks and server-specific status retrieval.
- Refactored the MCP connection status endpoints to support both all servers and individual server queries.
- Replaced the old server initialization hook with a new `useMCPServerManager` hook for improved state management and handling of multiple OAuth flows.
- Updated the MCPPanel component to utilize the new context provider for better state handling and UI updates.
- Fixed a number of UI bugs when initializing servers

* 🗣️ i18n: Remove unused strings from translation.json

* refactor: move helper functions out of the route module into mcp service file

* ci: add tests for newly added functions in mcp service file

* fix: memoize setMCPValues to avoid render loop
2025-07-28 12:25:34 -04:00
ryanh-ai
37aba18a96 🪟 feat: Context Window for amazon.nova-premier (#8689) 2025-07-28 12:24:08 -04:00
Marco Beretta
2ce6ac74f4 📻 feat: radio component (#8692)
* 📦 feat: Add Radio component

* 📦 feat: Integrate localization for 'No options available' message in Radio component

* 📦 feat: Bump version to 0.2.0 in package.json

* 📦 feat: Update client package version to 0.2.0 in package-lock.json
2025-07-28 12:18:59 -04:00
Danny Avila
9fddb0ff6a 🐋 ci: Include packages/client source files in Dockerfile.multi Client Build 2025-07-27 15:11:42 -04:00
Danny Avila
32f7dbd11f 🐳 ci: Build client package for Dockerfile.multi 2025-07-27 13:29:43 -04:00
Danny Avila
79197454f8 📦 feat: Move Shared Components to @librechat/client (#8685)
* feat: init @librechat/client

* feat: Add common types and interfaces for accessibility, agents, artifacts, assistants, and tools

* feat: Add jotai as a peer dependency

* fix build client package

* feat: cleanup unused types from common/index.ts

- Remove 104 unused type exports from packages/client/src/common/index.ts
- Keep only 7 actually used exports (93% reduction)
- Add cleanup script with enhanced import pattern detection
- Support both named imports and namespace imports (* as t)
- Create automatic backups and comprehensive documentation
- Maintain type safety with build verification
- No breaking changes to existing code

Kept exports:
- TShowToast, Option, OptionWithIcon, DropdownValueSetter
- MentionOption, NotificationSeverity, MenuItemProps

Scripts: cleanup-common-types-safe.js, README-CLEANUP.md

* fix: cleanup

* fix: package; refactor: tsconfig

* feat: add back `recoil`

* fix: move dependencies to peerDependencies in client package

* feat: add @librechat/client as a dependency in package.json and package-lock.json

* feat: update client package configuration and dependencies

- Added new dependencies for Rollup plugins and updated existing ones in package.json and package-lock.json.
- Introduced a new Rollup configuration file for building the client package.
- Refactored build scripts to include a dedicated build command for the client.
- Updated TypeScript configuration for improved module resolution and type declaration output.
- Integrated a Toast component from the client package into the main App component.

* feat: enhance Rollup configuration for client package

- Updated terser plugin settings to preserve directives like 'use client'.
- Added custom warning handler to ignore "use client" directive warnings during the build process.

* chore: rename package/client build script command

* feat: update client package dependencies and Rollup configuration

- Added rollup-plugin-postcss to package.json and updated package-lock.json.
- Enhanced Rollup configuration to include postcss plugin for CSS handling.
- Updated index.ts to export all components from the components directory for better modularity.

* feat: add client package directory to update configuration

- Included the 'client' package directory in the update.js configuration to ensure it is recognized during updates.

* feat: export Toast component in client package

- Added export for the Toast component in index.ts to enhance modularity and accessibility of components.

* feat: /client transition to @librechat/client

* chore: fixed formatting issues

* fix: update peer dependencies in @librechat/client to prevent bundling them

* fix: correct useSprings implementation in SplitText component

* fix: circular dependencies in DataTable

* fix: add remaining peer dependencies and match actual versions previously used in `client/package.json`

* fix: correct frontend:ci script to include client package build

* chore: enhance unused package detection for @librechat/client and improve dependency extraction

* fix: add missing peer dependency for @radix-ui/react-collapsible

* chore: include "packages/client" in unused i18next keys detection

* test: update AgentFooter tests to use document.querySelector for spinner checks
test: mock window.matchMedia in setupTests.js for consistent test environment

* feat: add react-hook-form dependency and update FormInput component to use its types

* chore: linting

* refactor: remove unused defaultSelectedValues prop from MCPSelect and MultiSelect components

* chore: linting

* feat: update GitHub Actions workflow to publish @librechat/client

* chore: update GitHub Actions workflow to install and build data-provider and client dependencies

* chore: add missing @testing-library/react dependency to client package

* chore: update tsconfig.json to exclude additional test files

* chore: fix build issues, resolve latest LC changes

* chore: move MCP components outside of `~/components/ui`

* feat: implement dynamic theme system with environment variable support and Tailwind CSS integration

* chore: remove unnecessary logging of sttExternal and ttsExternal in Speech component

* chore: squashed cleanup commits

chore: move @tanstack/react-virtual to dependencies and remove recoil from package.json

chore: move dependencies to peerDependencies in package.json

feat: update package.json and rollup.config.js to include jotai and enhance bundling configuration

feat: update package.json and rollup.config.js to include jotai and enhance bundling configuration

refactor: reorganize exports in index.ts for improved clarity

refactor: remove unused types and interfaces from common files

refactor: update peer dependencies and improve component typings

- Removed duplicate peer dependencies from package.json and organized them.
- Updated rollup.config.js to disable TypeScript checking during the build process.
- Modified AnimatedTabs component to use React.ReactNode for label and content types, and added TypeScript workarounds for compatibility.
- Enhanced Label and Separator components to accept an optional className prop and improved prop spreading.
- Updated Slider component to include an optional className prop and refined prop handling for better type safety.

refactor: clean up client workflow and update package dependencies

refactor: update package dependencies and improve PostCSS and Rollup configurations

chore: bump version to 0.1.2 in package.json

chore: bump client version to 0.1.2 in package-lock.json

chore: bump client version to 0.1.3 and update dependencies

chore: bump client version to 0.1.4 and update @react-spring dependencies

chore: update package version to 0.1.5 and adjust peer dependencies

- Bump version in package.json from 0.1.4 to 0.1.5.
- Update peer dependency for @tanstack/react-query to allow version 5.0.0.
- Add @tanstack/react-table and @tanstack/react-virtual as dependencies.
- Update various dependencies to their latest compatible versions.
- Simplify postcss.config.js by removing unnecessary options.
- Clean up rollup.config.js by removing ignored PostCSS warnings.
- Update CheckboxButton component to cast icon as React JSX element.
- Adjust Combobox component's class names for better styling.
- Change DropdownPopup component to use React's namespace import.
- Modify InputOTP component to use 'any' type for OTPInputContext.
- Ensure displayLabel and value in ModelParameters are converted to strings.
- Update MultiSearch component's placeholder to ensure it's a string.
- Cast selectIcon in MultiSelect as React JSX element for consistency.
- Update OGDialogTemplate to cast selectText as React JSX element.
- Initialize animationRef in PixelCard with undefined for clarity.
- Add TypeScript ignore comments in Select and SelectDropDown components for Radix UI type conflicts.
- Ensure title in SelectDropDown is a string and adjust rendering of options.
- Update useLocalize hook to cast options as any for compatibility.

refactor: code structure; chore: translations cleanup

chore: remove unused imports and clean up code in NewChat component

refactor: enhance Menu component to support custom render functions for menu items

style: update itemClassName in ToolsDropdown for improved UI consistency

fix: merge conflicts

chore: update @radix-ui/react-accordion to version 1.2.11

* refactor: remove unnecessary TypeScript type assertions in AnimatedTabs, Label, Separator, and Slider components

* feat: enhance theme system with localStorage persistence and new theme atoms

* chore: bump version of @librechat/client to 0.1.7

* chore: fix ci/cd warnings/errors related to linting and unused localization keys

* chore: update dependencies for class-variance-authority, clsx, and match-sorter

* chore: bump @librechat/client to v0.1.8

* feat: add utility colors for theme customization and remove unused tailwindConfig

* v0.1.9

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
2025-07-27 12:19:01 -04:00
Danny Avila
97e1cdd224 🧗 refactor: Replace traverse package with Minimal Traversal for Logging (#8687)
* 📦 chore: Remove `keyv` from peerDependencies in package.json and package-lock.json for data-schemas

* refactor: replace traverse import with custom object-traverse utility for better control and error handling during logging

* chore(data-schemas): bump version to 0.0.15 and remove unused dependencies

* refactor: optimize message construction in debugTraverse

* chore: update Node.js version to 20.x in data-schemas workflow
2025-07-27 11:47:37 -04:00
Dustin Healy
d6a65f5a08 🐛 fix: Temporary Chats Still Visible in Sidebar (#8688)
* 🐛 fix: Fix import error causing temporary chats to still display in sidebar

* refactor: Update import path for `getCustomConfig` in Conversation and Message models

* chore: eslint warnings

* ci: add tests for Conversation and Message models

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-27 11:42:35 -04:00
Danny Avila
f4facb7d35 🪵 refactor: Dynamic getLogDirectory utility for Loggers (#8686) 2025-07-26 20:11:20 -04:00
Dustin Healy
545a909953 🗂️ refactor: Make MCPSubMenu consistent with MCPSelect (#8650)
- Refactored MCPSelect and MCPSubMenu components to utilize a new custom hook, `useMCPServerManager`, for improved state management and server initialization logic.
- Added functionality to handle simultaneous MCP server initialization requests, including cancellation and user notifications.
- Updated translation files to include new messages for initialization cancellation.
- Improved the configuration dialog handling for MCP servers, streamlining the user experience when managing server settings.
2025-07-25 14:51:42 -04:00
Danny Avila
cd436dc6a8 📦 chore: Update @modelcontextprotocol/sdk to v1.17.0 (#8674)
* 📦 chore: Update `@modelcontextprotocol/sdk` to v1.17.0

* refactor: unused package detection by extracting workspace dependencies in GitHub Actions workflow

* chore: Enhance unused package detection by including peerDependencies extraction in GitHub Actions workflow

* fix: Ensure safe extraction of dependencies and peerDependencies in unused package detection workflow
2025-07-25 14:06:16 -04:00
Danny Avila
e75beb92b3 🗑️ chore: Remove Workflows for Changelogs (#8673) 2025-07-25 13:45:22 -04:00
Danny Avila
5251246313 📱 refactor: Redis Client Error Logging and Ping only when Ready (#8671)
* 📱 refactor: Redis Client Error Logging and Ping only when Ready

* chore: intellisense for warning comment for Keyv Redis client regarding prefix support
2025-07-25 12:33:05 -04:00
Danny Avila
26f23c6aaf 📦 chore: Bump @node-saml/passport-saml to v5.1.0 (#8670) 2025-07-25 11:26:20 -04:00
Danny Avila
1636af1f27 📦 chore: Bump mongodb-memory-server to v10.1.4 (#8669) 2025-07-25 11:23:38 -04:00
Theo N. Truong
b050a0bf1e feat: Add Redis Ping Interval Configuration (#8648)
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-25 11:00:02 -04:00
github-actions[bot]
deb928bf80 🌍 i18n: Update translation.json with latest translations (#8664)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-25 10:36:14 -04:00
Theo N. Truong
21005b66cc feat: Add support for forced in-memory cache namespaces configuration (#8586)
*  feat: Add support for forced in-memory cache keys configuration

* refactor: Update cache keys to use uppercase constants and moved cache for `librechat.yaml` into its own cache namespace (STATIC_CONFIG) and with a more descriptive key (LIBRECHAT_YAML_CONFIG)
2025-07-25 10:32:55 -04:00
Dustin Healy
3dc9e85fab 🐛 fix: Display OAuth MCP servers according to Chat Menu Setting (#8643)
* fix: chatMenu not being respected in MCPSelect

* fix: chatMenu not being respected in MCPSubMenu
2025-07-25 10:21:10 -04:00
Sebastien Bruel
ec67cf2d3a 🚇 chore: Remove Overridden Transport Error Listener (#8656) 2025-07-25 10:17:33 -04:00
Dustin Healy
1fe977e48f 🐛 fix: MCP Name Normalization breaking User Provided Variables (#8644) 2025-07-24 10:44:58 -04:00
Danny Avila
01470ef9fd 🔄 refactor: Default Completion Title Prompt and Title Model Selection (#8646)
* refactor: prefer `agent.model` (user-facing value) over `agent.model_parameters.model` to ensure Azure mapping

* chore: update @librechat/agents to version 2.4.68 to use new default title prompt for completion title method
2025-07-24 10:38:26 -04:00
Danny Avila
bef5c26bed v0.7.9 (#8638)
* chore: update version to v0.7.9 across all relevant files

* 🔧 chore: bump @librechat/api version to 1.2.9

* 🔧 chore: update @librechat/data-schemas version to 0.0.12

* 🔧 chore: bump librechat-data-provider version to 0.7.902
2025-07-24 01:46:47 -04:00
github-actions[bot]
9e03fef9db 🌍 i18n: Update translation.json with latest translations (#8639)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-24 00:46:42 -04:00
Sebastien Bruel
283c9cff6f ℹ️ fix: Add back Removed Icons for MCP Servers in Tools Dialog (#8636)
* Bug: Fix icons for MCP servers

* Add `OPENAI_API_KEY` to `jestSetup.js` to fix tests
2025-07-24 00:41:06 -04:00
Danny Avila
0aafdc0a86 🔳 fix: Bare Object MCP Tool Schemas as Passthrough (#8637)
* 🔳 fix: Bare Object MCP Tool Schemas as Passthrough

* ci: Add cases for handling complex object schemas in convertJsonSchemaToZod
2025-07-24 00:11:20 -04:00
Danny Avila
365e3bca95 🔁 feat: Allow "http" as Alias for "streamable-http" in MCP Options (#8624)
- Updated StreamableHTTPOptionsSchema to accept "http" alongside "streamable-http".
- Enhanced isStreamableHTTPOptions function to handle both types and validate URLs accordingly.
- Added tests to ensure correct processing of "http" type options and rejection of websocket URLs.
2025-07-23 10:26:40 -04:00
Danny Avila
a01536ddb7 🔗 fix: Set Abort Signal for Agent Chain Run if Cleaned Up (#8625) 2025-07-23 10:26:27 -04:00
github-actions[bot]
8a3ff62ee6 🌍 i18n: Update translation.json with latest translations (#8613)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-23 09:06:36 -04:00
Danny Avila
74d8a3824c 🔌 feat: MCP Reinitialization and OAuth in UI (#8598)
*  feat: Add connection status endpoint for MCP servers

- Implemented a new endpoint to retrieve the connection status of all MCP servers without disconnecting idle connections.
- Enhanced MCPManager class with a method to get all user-specific connections.

* feat: add silencer arg to loadCustomConfig function to conditionally print config details

- Modified loadCustomConfig to accept a printConfig parameter that allows me to prevent the entire custom config being printed every time it is called

* fix: new status endpoint actually works now, changes to manager.ts to support it

- Updated the connection status endpoint to utilize Maps for app and user connections, rather than incorrectly treating them as objects.
- Introduced a new method + variable in MCPManager to track servers requiring OAuth discovered at startup.
- Stopped OAuth flow from continuing once detected during startup for a new connection

* refactor: Remove hasAuthConfig since we can get that on the frontend without needing to use the endpoint

* feat: Add MCP connection status query and query key for new endpoint

- Introduced a new query hook `useMCPConnectionStatusQuery` to fetch the connection status of MCP servers.
- Added request in data-service
- Defined the API endpoint for retrieving MCP connection status in api-endpoints.ts.
- Defined new types for MCP connection status responses in the types module.
- Added mcpConnectionStatus key

* feat: Enhance MCPSelect component with connection status and server configuration

- Added connection status handling for MCP servers using the new `useMCPConnectionStatusQuery` hook.
- Implemented logic to display appropriate status icons based on connection state and authentication configuration.
- Updated the server selection logic to utilize configured MCP servers from the startup configuration.
- Refactored the rendering of configuration buttons and status indicators for improved user interaction.

* refactor: move MCPConfigDialog to its own  MCP subdir in ui and update import

* refactor: silence loadCustomConfig in status endpoint

* feat: Add optional pluginKey parameter to getUserPluginAuthValue

* feat: Add MCP authentication values endpoint and related queries

- Implemented a new endpoint to check authentication value flags for specific MCP servers, returning boolean indicators for each custom user variable.
- Added a corresponding query hook `useMCPAuthValuesQuery` to fetch authentication values from the frontend.
- Defined the API endpoint for retrieving MCP authentication values in api-endpoints.ts.
- Updated data-service to include a method for fetching MCP authentication values.
- Introduced new types for MCP authentication values responses in the types module.
- Added a new query key for MCP authentication values.

* feat: Localize MCPSelect component status labels and aria attributes

- Updated the MCPSelect component to use localized strings for connection status labels and aria attributes, enhancing accessibility and internationalization support.
- Added new translation keys for various connection states in the translation.json file.

* feat: Implement filtered MCP values selection based on connection status in MCPSelect

- Added a new `filteredSetMCPValues` function to ensure only connected servers are selectable in the MCPSelect component.
- Updated the rendering logic to visually indicate the connection status of servers by adjusting opacity.
- Enhanced accessibility by localizing the aria-label for the configuration button.

* feat: Add CustomUserVarsSection component for managing user variables

- Introduced a new `CustomUserVarsSection` component to allow users to configure custom variables for MCP servers.
- Integrated localization for user interface elements and added new translation keys for variable management.
- Added functionality to save and revoke user variables, with visual indicators for set/unset states.

* feat: Enhance MCPSelect and MCPConfigDialog with improved state management and UI updates

- Integrated `useQueryClient` to refetch queries for tools, authentication values, and connection status upon successful plugin updates in MCPSelect.
- Simplified plugin key handling by directly using the formatted plugin key in save and revoke operations.
- Updated MCPConfigDialog to include server status indicators and improved dialog content structure for better user experience.
- Added new translation key for active status in the localization files.

* feat: Enhance MCPConfigDialog with dynamic server status badges and localization updates

- Added a helper function to render status badges based on the connection state of the MCP server, improving user feedback on connection status.
- Updated the localization files to include new translation keys for connection states such as "Connecting" and "Offline".
- Refactored the dialog to utilize the new status rendering function for better code organization and readability.

* feat: Implement OAuth handling and server initialization in MCP reinitialize flow

- Added OAuth handling to the MCP reinitialize endpoint, allowing the server to capture and return OAuth URLs when required.
- Updated the MCPConfigDialog to include a new ServerInitializationSection for managing server initialization and OAuth flow.
- Enhanced the user experience by providing feedback on server status and OAuth requirements through localized messages.
- Introduced new translation keys for OAuth-related messages in the localization files.
- Refactored the MCPSelect component to remove unused authentication configuration props.

* feat: Make OAuth actually work / update after OAuth link authorized

- Improved the handling of OAuth flows in the MCP reinitialize process, allowing for immediate return when OAuth is initiated.
- Updated the UserController to extract server names from plugin keys for better logging and connection management.
- Enhanced the MCPSelect component to reflect authentication status based on OAuth requirements.
- Implemented polling for OAuth completion in the ServerInitializationSection to improve user feedback during the connection process.
- Refactored MCPManager to support new OAuth flow initiation logic and connection handling.

* refactor: Simplify MCPPanel component and enhance server status display

- Removed unused imports and state management related to user plugins and server reinitialization.
- Integrated connection status handling directly into the MCPPanel for improved user feedback.
- Updated the rendering logic to display server connection states with visual indicators.
- Refactored the editing view to utilize new components for server initialization and custom user variables management.

* chore: remove comments

* chore: remove unused translation key for MCP panel

* refactor: Rename returnOnOAuthInitiated to returnOnOAuth for clarity

* refactor: attempt initialize on server click

* feat: add cancel OAuth flow functionality and related UI updates

* refactor: move server status icon logic into its own component

* chore: remove old localization strings (makes more sense for icon labels to just use configure stirng since thats where it leads to)

* fix: fix accessibility issues with MCPSelect

* fix: add missing save/revoke mutation logic to MCPPanel

* styling: add margin to checkmark in MultiSelect

* fix: add back in customUserVars check to hide gear config icon for servers without customUserVars

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
Co-authored-by: Dustin Healy <54083382+dustinhealy@users.noreply.github.com>
2025-07-22 22:52:45 -04:00
Danny Avila
62c3f135e7 ✔️ fix: Resource field TypeError & Missing Role Permission Type (#8606)
* fix: resource parameter undefined TypeError in log

* chore: Add missing FILE_SEARCH permission type to IRole interface

* chore: Bump version of @librechat/data-schemas to 0.0.11

* fix: Ensure resource is defined and handle potential null values in OAuth flow
2025-07-22 18:22:58 -04:00
Rinor Maloku
baf3b4ad08 🔐 feat: Add Resource Parameter to OAuth Requests per MCP Spec (#8599) 2025-07-22 17:52:55 -04:00
Danny Avila
e5d08ccdf1 🗂️ feat: Add File Search Toggle Permission for Chat Area Badge (#8605) 2025-07-22 17:51:21 -04:00
github-actions[bot]
5178507b1c 🌍 i18n: Update translation.json with latest translations (#8602)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-22 15:53:30 -04:00
Danny Avila
f797e90d79 🔀 feat: moonshotai/kimi Context and OpenRouter Endpoint Token Config (#8604)
*  feat: Enhance agent initialization with endpoint token configuration and round max context tokens

* feat: recognize moonshot/kimi model context window

* chore: remove unused i18n key
2025-07-22 15:52:54 -04:00
Danny Avila
259224d986 🧼 chore: Clean up Settings by Removing Beta tab and reorganizing imports 2025-07-22 12:05:58 -04:00
Danny Avila
13789ab261 ☁️ fix: 'thinking' parameter default to false for Bedrock Conversations (#8600) 2025-07-22 12:01:18 -04:00
Dustin Healy
faaba30af1 feat: Add MCP Reinitialization to MCPPanel (#8418)
*  feat: Add MCP Reinitialization to MCPPanel

- Refactored tool caching to include user-specific tools in various service files.
- Refactored MCPManager class for clarity
- Added a new endpoint for reinitializing MCP servers, allowing for dynamic updates of server configurations.
- Enhanced the MCPPanel component to support server reinitialization with user feedback.

* 🔃 refactor: Simplify Plugin Deduplication and Clear Cache Post-MCP Initialization

- Replaced manual deduplication of tools with the dedicated `filterUniquePlugins` function for improved readability.
- Added back cache clearing for tools after MCP initialization to ensure fresh data is used.
- Removed unused exports from `PluginController.js` to clean up the codebase.
2025-07-21 17:49:19 -04:00
Danny Avila
14660d75ae 🆕 feat: Enhanced Title Generation Config Options (#8580)
* 🏗️ refactor: Extract reasoning key logic into separate function

* refactor: Ensure `overrideProvider` is always defined in `getProviderConfig` result, and only used in `initializeAgent` if different from `agent.provider`

* feat: new title configuration options across services

- titlePrompt
- titleEndpoint
- titlePromptTemplate
- new "completion" titleMethod (new default)

* chore: update @librechat/agents and conform openai version to prevent SDK errors

* chore: add form-data package as a dependency and override to v4.0.4 to address CVE-2025-7783

* feat: add support for 'all' endpoint configuration in AppService and corresponding tests

* refactor: replace HttpsProxyAgent with ProxyAgent from undici for improved proxy handling in assistant initialization

* chore: update frontend review workflow to limit package paths to data-provider

* chore: update backend review workflow to include all package paths
2025-07-21 17:37:37 -04:00
Danny Avila
aec1777a90 📦 chore: bump @librechat/agents to v2.4.63 (#8558) 2025-07-19 14:37:22 -04:00
Danny Avila
90c43dd451 🔒 fix: Address multer CVE-2025-7338 (#8557) 2025-07-19 14:23:20 -04:00
Danny Avila
4c754c1190 🏄‍♂️ fix: Handle SSE Stream Edge Case (#8556)
* refactor: Move draft-related utilities to a new `drafts.ts` file

* refactor: auto-save draft logic to use new get/set functions

* fix: Ensure `getDraft` properly decodes stored draft values

* fix: Handle edge case where stream is cancelled before any response, which creates a blank page
2025-07-19 13:44:02 -04:00
Danny Avila
f70e0cf849 🔒 fix: Address on-headers CVE-2025-7339 (#8553)
* 📦 chore: bump `compression` from 1.7.4 to 1.8.1

* chore: bump `express-session` to v1.18.2

* chore: update `connect-redis` from v7.1.0 to v8.1.0

* chore: update import for `connect-redis` to use named export due to v8.0.0 breaking change
2025-07-19 13:36:59 -04:00
Dustin Healy
d0c958ba33 🔥 feat: Add Firecrawl Scraper Configurability (#8495)
- Added firecrawlOptions configuration field to librechat.yaml
- Refactored web.ts to live in packages/api rather than data-provider
- Updated imports from web.ts to reflect new location
- Added firecrawlOptions to FirecrawlConfig interface
- Added firecrawlOptions to authResult of loadWebSearchAuth so it gets properly passed to agents to be built into firecrawl payload
- Added tests for firecrawlOptions to web.spec.ts
2025-07-18 22:37:57 -04:00
Dustin Healy
0761e65086 🔧 fix: Enhance Responses API Auto-Enable Logic for Compatible Endpoints (#8506)
- Updated the logic to auto-enable the Responses API when web search is enabled, specifically for OpenAI, Azure, and Custom endpoints.
- Added import for EModelEndpoint to facilitate endpoint compatibility checks.
2025-07-18 22:27:56 -04:00
Danny Avila
0bf708915b ♻️ refactor: formatContentStrings to support AI and System messages (#8528)
* ♻️ refactor: `formatContentStrings` to support AI and System messages

* 📦 chore: bump @librechat/api version to 1.2.7
2025-07-17 19:19:37 -04:00
Danny Avila
cf59f1ab45 📦 chore: bump librechat-data-provider to v0.7.900 2025-07-17 18:42:34 -04:00
Danny Avila
445e9eae85 🧩 fix: Human Message Content Handling for Legacy Content (#8525)
* wip: first pass content strings

* 📦 chore: update @langchain/core to v0.3.62 for data-provider dev dep.

* 📦 chore: bump @langchain/core to v0.3.62 for api dep.

* 📦 chore: move @langchain/core to peerDependencies in package.json and package-lock.json

* fix: update formatContentStrings to create HumanMessage directly from formatted content

* chore: import order
2025-07-17 18:34:24 -04:00
Danny Avila
cd9c578907 📦 chore: bump @librechat/agents to to v2.4.62 (#8524) 2025-07-17 17:54:25 -04:00
github-actions[bot]
ac94c73f23 🌍 i18n: Update translation.json with latest translations (#8505)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-17 11:31:45 -04:00
Danny Avila
dfef7c31d2 ♻️ refactor: SidePanel Context to Optimize on ChatView Rerender (#8509) 2025-07-17 11:31:19 -04:00
Danny Avila
0b1b0af741 ☑️ refactor: Allow Mid-convo Agent Selection from Agent Panel (#8510) 2025-07-17 11:30:50 -04:00
Ben Verhees
0a169a1ff6 👥 fix: Collaborative Check Flag for Shared Agent Files (#8516) 2025-07-17 10:42:57 -04:00
Danny Avila
4b12ea327a 📦 chore: bump @librechat/agents to to v2.4.61 (#8504) 2025-07-16 18:32:31 -04:00
Danny Avila
35d8ef50f4 🪙 fix: Use Fallback Token Transaction if No Collected Usage (#8503) 2025-07-16 17:58:15 -04:00
Danny Avila
1dabe96404 🕒 refactor: Use Legacy Content for Custom Endpoints and Azure Serverless for Improved Compatibility (#8502)
* 🕒 refactor: Use Legacy Content for Custom Endpoints to Improve Compatibility

- Also applies to Azure serverless endpoints from AI Foundry

* chore: move useLegacyContent condition before early return

* fix: Ensure useLegacyContent is set only when options are available
2025-07-16 17:17:15 -04:00
Dustin Healy
7f8c327509 🌊 feat: Add Disable Streaming Toggle (#8177)
* 🌊 feat: Add Disable Streaming Option in Configuration

- Introduced a new setting to disable streaming responses in openAI, Azure, and custom endpoint parameter panels.
- Updated translation files to include labels and descriptions for the disable streaming feature.
- Modified relevant schemas and parameter settings to support the new disable streaming functionality.

* 🔧 fix: disableStreaming state not persisting when returning to a conversation

- Added disableStreaming field to the IPreset interface and conversationPreset.
- Moved toggles and sliders around for nicer left-right UI split in parameters panel.
- Removed old reference to 'grounding' ub conversationPreset (now web_search) and added web_search to IPreset.
2025-07-16 10:09:40 -04:00
Danny Avila
52bbac3a37 feat: Add GitHub Actions workflow for publishing @librechat/client to NPM 2025-07-16 09:19:59 -04:00
Danny Avila
62b4f3b795 🛂 fix: Only Perform allowedProviders Validation for Agents (#8487) 2025-07-15 18:43:47 -04:00
Theo N. Truong
01b012a8fa 🏦 refactor: Centralize Caching & Redis Key Prefixing (#8457)
* 🔧 Overhauled caching feature:
- Refactored caching logic.
- Fixed redis prefix, namespace, tls, ttl, and cluster.
- Added REDIS_KEY_PREFIX_VAR

* # refactor: Rename redisCache to standardCache

* # Add Redis pinging mechanism to maintain connection.

* # docs: Add warning about Keyv Redis client prefix support
2025-07-15 18:24:31 -04:00
Danny Avila
418b5e9070 ♻️ fix: Resolve MCP Connection if Ping is Unsupported (#8483) 2025-07-15 18:20:11 -04:00
Danny Avila
a9f01bb86f 📝 refactor: Memory Instructions for Improved Performance (#8463) 2025-07-14 18:37:46 -04:00
Danny Avila
aeeb860fe0 📦 chore: bump @librechat/agents to v2.4.60 (#8458) 2025-07-14 18:29:48 -04:00
github-actions[bot]
e11e716807 🌍 i18n: Update translation.json with latest translations (#8422)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-14 17:22:02 -04:00
Danny Avila
e370a87ebe ♻️ fix: Correct Message ID Assignment Logic (#8439)
* fix: Add `isRegenerate` flag to chat payload to avoid saving temporary response IDs

* fix: Remove unused `isResubmission` flag

* ci: Add tests for responseMessageId regeneration logic in BaseClient
2025-07-14 00:57:20 -04:00
Danny Avila
170cc340d8 refactor: Imports to Prevent Circular Type Refs (#8423) 2025-07-12 11:37:07 -04:00
Danny Avila
f1b29ffb45 🔒 feat: View/Delete Shared Agent Files (#8419)
* 🔧 fix: Add localized message for delete operation not allowed

* refactor: improve file deletion operations ux

* feat: agent-based file access control and enhance file retrieval logic

* feat: implement agent-specific file retrieval

* feat: enhance agent file retrieval logic for authors and shared access

* ci: include userId and agentId in mockGetFiles call for OCR file retrieval
2025-07-12 01:52:46 -04:00
Danny Avila
6aa4bb5a4a 👟 fix: Edge Case of Azure Provider Assignment for Title Run (#8420) 2025-07-12 01:52:17 -04:00
Sebastien Bruel
9f44187351 🗂️ fix: Disable express-static-gzip for Uploaded Images (#8307)
* Fix scanning of the uploaded images folder on startup

* Re-write tests to pass linting

* Disable image output gzip scan by default

* Add `ENABLE_IMAGE_OUTPUT_GZIP_SCAN` to `.env.example`
2025-07-11 16:51:53 -04:00
Samuel Path
d2e1ca4c4a 🖼️ fix: Permission Checks for Agent Avatar Uploads (#8412)
Implements permission validation before allowing agent avatar uploads. Only admins, the agent's author, or users of collaborative agents can modify avatars. Also improves error handling by checking for agent existence upfront and simplifies avatar update logic.

Co-authored-by: Sai Nihas <sai.nihas@shopify.com>
2025-07-11 15:37:11 -04:00
Samuel Path
8e869f2274 🧠 feat: Enforce Token Limit for Memory Usage (#8401) 2025-07-11 14:46:19 -04:00
Danny Avila
2e1874e596 🔧 fix: handleError import path to use '@librechat/api' (#8415)
* 🔧 fix: Update handleError import path to use '@librechat/api' in middleware files

* chore: import order

* chore: import order

---------

Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com>
2025-07-11 13:29:51 -04:00
Danny Avila
929b433662 🔧 fix: Plugin Method Undefined in Agent Tool Closure (#8413) 2025-07-11 13:16:59 -04:00
Danny Avila
1e4f1f780c 🔑 feat: Grok 4 Pricing and Token Limits (#8395)
* 🔑 feat: Grok 4 Pricing and Token Limits

* 🔑 feat: Update Grok 3 Pricing for Mini and Fast Models
2025-07-11 03:24:13 -04:00
Danny Avila
4733f10e41 📦 chore: Bump @librechat/agents to v2.4.59 (#8392)
* chore: remove @librechat/agents temporarily

* chore: bump @librechat/agents to v2.4.59
2025-07-11 03:18:36 -04:00
Danny Avila
110984b48f 📦 chore: Bump @librechat/agents to v2.4.58 (#8386) 2025-07-10 20:41:38 -04:00
Danny Avila
19320f2296 🔑 feat: Base64 Google Service Keys and Reliable Private Key Formats (#8385) 2025-07-10 20:33:01 -04:00
Danny Avila
8523074e87 🔧 fix: Invalidate Tool Caching after MCP Initialization (#8384)
- Added Constants import in PluginController for better organization.
- Renamed cachedTools to cachedToolsArray for clarity in PluginController.
- Ensured getCachedTools returns an empty object if no tools are found.
- Cleared tools array cache after MCP initialization in initializeMCP for consistency.
2025-07-10 20:32:38 -04:00
Danny Avila
e4531d682d 🔃 refactor: Conslidate JSON Schema Conversion to Schema 2025-07-10 18:52:24 -04:00
Danny Avila
4bbdc4c402 🧩 fix: additionalProperties Handling and Ref Resolution in Zod Schemas (#8381)
* fix: false flagging object as empty object when it has `additionalProperties` field

* 🔧 fix: Implement $ref resolution in JSON Schema handling

* 🔧 fix: Resolve JSON Schema references before conversion to Zod

* chore: move zod logic packages/api
2025-07-10 18:02:34 -04:00
Danny Avila
8ca4cf3d2f 🔧 fix: Update Drag & Drop Logic with new File Option handling (#8354) 2025-07-10 08:38:55 -04:00
Danny Avila
13a9bcdd48 🔧 fix: Omit 'additionalModelRequestFields' from Bedrock Titling (#8353) 2025-07-10 08:38:30 -04:00
Danny Avila
4b32ec42c6 📝 fix: Resolve Markdown Rendering Issues (#8352)
* 🔧 fix: Handle optional arguments in `useParseArgs` and improve tool call condition

* chore: Remove math plugins from `MarkdownLite`

*  feat: Add Error Boundary to Markdown Component for Enhanced Error Handling

- Introduced `MarkdownErrorBoundary` to catch and display errors during Markdown rendering.
- Updated the `Markdown` component to utilize the new error boundary, improving user experience by handling rendering issues gracefully.

* Revert "chore: Remove math plugins from `MarkdownLite`"

This reverts commit d393099d52.

*  feat: Introduce MarkdownErrorBoundary for improved error handling in Markdown components

* refactor: include most markdown elements in error boundary fallback, aside from problematic plugins
2025-07-10 08:38:14 -04:00
Danny Avila
4918899c8d 🖨️ fix: Use Azure Serverless API Version for Responses API (#8316) 2025-07-08 21:07:52 -04:00
Danny Avila
7e37211458 🗝️ refactor: loadServiceKey to Support Stringified JSON and Env Var Renaming (#8317)
* feat: Enhance loadServiceKey to support stringified JSON input

* chore: Update GOOGLE_SERVICE_KEY_FILE_PATH to GOOGLE_SERVICE_KEY_FILE for consistency
2025-07-08 21:07:33 -04:00
Theo N. Truong
e57fc83d40 🔧 fix: Import Path for Custom Configuration Loading (#8319) 2025-07-08 21:07:04 -04:00
Danny Avila
550610dba9 ⚖️ feat: Add Violation Scores (#8304)
- Introduced new violation scores for TTS, STT, Fork, Import, and File Upload actions in the .env.example file.
- Updated logViolation function to accept a score parameter, allowing for dynamic severity levels based on the action type.
- Modified limiters for Fork, Import, Message, STT, TTS, Tool Call, and File Upload to utilize the new violation scores when logging violations.
2025-07-07 17:08:40 -04:00
github-actions[bot]
916cd46221 🌍 i18n: Update translation.json with latest translations (#8288)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-07 17:08:15 -04:00
Dustin Healy
12b08183ff 🐛 fix: Memories Key Updates (#8302)
* Updated the PATCH /memories/:key endpoint to allow key changes while ensuring no duplicate keys exist.
* Improved error handling in MemoryCreateDialog and MemoryEditDialog for key validation and duplication scenarios.
* Added a new translation for memory key validation error in translation.json.
2025-07-07 16:38:55 -04:00
Danny Avila
f4d97e1672 📝 docs: Update README 2025-07-07 01:14:07 -04:00
Danny Avila
035fa081c1 🔧 refactor: Prevent Unnecessary Google Service Key Loading (#8287)
* 🔧 refactor: Improve Google Key Handling in `loadAsyncEndpoints`

- Enhanced logic to check if GOOGLE_KEY is provided, including user-provided checks.
- Updated service key loading mechanism to only attempt loading if GOOGLE_KEY is not provided.
- Added error logging for service key loading failures.

* 🔧 refactor: Enhance service key loading logic in `initializeClient`
2025-07-07 01:10:08 -04:00
Danny Avila
aecf8f19a6 🔧 fix: Initialize reasoningKey to 'reasoning_content' (#8286)
* chore: bump @librechat/agents to v2.4.56

* chore: bump @librechat/api version to 1.2.6

* fix: initialize reasoningKey to 'reasoning_content' in createRun function
2025-07-07 01:05:40 -04:00
Dustin Healy
35f548a94d 🔄 refactor: Google grounding field to web_search for Consistency (#8285)
- Updated the Google configuration and related schemas to replace 'grounding' with 'web_search' for consistency.
- Adjusted the logic in the getGoogleConfig function to reflect the new naming convention.
- Ensured all references in parameter settings and conversation schemas are updated accordingly.
2025-07-07 00:41:51 -04:00
Danny Avila
e60c0cf201 🔍 feat: Anthropic Web Search (#8281)
* chore: bump @librechat/agents to ^2.4.54 for anthropic web search support

* WIP: hardcoded web search tool usage

* feat: Implement web search functionality in Anthropic integration

- Updated parameters panel to include web search for anthropic models.
- Updated necessary schemas to accomodate toggle functionality

* chore: Set default web search option to false in anthropicSettings

* refactor: Rename webSearch to web_search for consistency across settings and schemas

* chore: bump @librechat/agents to v2.4.55

---------

Co-authored-by: Dustin Healy <dustinhealy1@gmail.com>
2025-07-06 21:43:09 -04:00
github-actions[bot]
5b392f9cb0 🌍 i18n: Update translation.json with latest translations (#8255)
* 🌍 i18n: Update translation.json with latest translations

* Update translation.json

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-05 18:04:57 -04:00
Dustin Healy
e0f468da20 🔍 feat: Add SearXNG for Web Search and Enhance ApiKeyDialog (#8242)
* 🔍 feat: Add SearXNG Web Search support and enhance ApiKeyDialog

- Updated WebSearch component to include authentication data for web search functionality so it won't show badge after being revoked
- Refactored ApiKeyDialog to streamline provider, scraper, and reranker selection with new InputSection component
- Added support for SearXNG as a search provider and updated translation files accordingly
- Improved form handling in useAuthSearchTool to accommodate new API keys and URLs

* 📜 chore: remove unused i18next key

* 📦 chore: address comments (swap API key and URL fields in SearXNG config, change input fields to 'text' from 'password'

* 📦 chore: make URL fields go first in ApiKeyDialog

* chore: bump @librechat/agents to v2.4.52

* ci: update webSearch configuration to include searxng fields in AppService.spec.js

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-07-05 17:58:22 -04:00
Danny Avila
91a2df4759 🔧 refactor: Change Permissions Check from some to every for Stricter Access Validation (#8270)
* 🔧 refactor: Change Permissions Check from `some` to `every` for Stricter Access Validation

* 🧪 ci: Add comprehensive tests for access middleware functions

* fix: custom provider check logic in `getProviderConfig` function
2025-07-05 15:53:08 -04:00
Danny Avila
97a99985fa 🛡️ feat: Rate Limiting for Conversation Forking (#8269)
* chore: Improve error logging for fetching conversations, and use new TS packages for utils

* feat: Implement fork limiters for conversation forking requests

* chore: error message for conversation index deletion to clarify syncing behavior

* feat: Enhance error handling for forking with rate limit message
2025-07-05 15:02:32 -04:00
Danny Avila
3554625a06 refactor: Add Robust Timestamp handling for Conversation Imports (#8262) 2025-07-05 12:44:19 -04:00
Danny Avila
a37bf6719c 🧪 refactor: Add Validation for Agent Creation/Updates (#8261)
* refactor: Add validation schemas for agent creation and updates

* fix: Ensure author validation is applied in correct order for agent update handler

* ci: Add comprehensive unit tests for agent creation and update handlers with mass assignment protection

* fix: add missing  web_search tool in system tools configuration
2025-07-05 11:34:28 -04:00
Danny Avila
e513f50c08 ⚒️ refactor: Keep useAvailableToolsQuery Enabled for All Endpoints 2025-07-04 15:43:17 -04:00
Danny Avila
f5511e4a4e 🔁 refactor: Capabilities for Tools/File handling for Direct Endpoints (#8253)
* feat: add useAgentCapabilities hook to manage agent capabilities

* refactor: move  agents and endpoints configuration to AgentPanel context provider

* refactor: implement useGetAgentsConfig hook for consolidated agents and endpoints management

* refactor: enhance ToolsDropdown to utilize agent capabilities and streamline dropdown item rendering

* chore: reorder return values in useAgentCapabilities for improved clarity

* refactor: enhance agent capabilities handling in AttachFileMenu and update file handling logic to allow capabilities to be used for non-agents endpoints
2025-07-04 14:51:26 -04:00
Danny Avila
a288ad1d9c 🪄 feat: Artifacts Badge & Optimize Ephemeral Agent State (#8252)
* 🔧 fix: Update type annotations in useEventHandlers for better type safety

* 🔧 refactor: `useToolToggle` for improved localStorage synchronization and allow string/falsy values for setting to storage

*  feat: Implement Artifacts badge to BadgeRow with toggle options and UI components

- Added Artifacts component to manage artifacts state and options.
- Introduced ArtifactsSubMenu for additional settings related to artifacts.
- Integrated artifacts functionality into BadgeRow and ToolsDropdown components.
- Updated localStorage handling for artifacts state persistence.
- Enhanced localization for artifacts-related strings in translation files.
- Refactored Agent model to include artifacts in the ephemeral agent response.

* fix: set ephemeral agent state for conversation on finalization

* chore: remove beta settings dialog tab

* refactor: improve Ephemeral Agent statefulness

* fix: update setValue parameter to use 'value' instead of 'isChecked' in CheckboxButton

* refactor: update color classes for Artifact toggle and order of dropdown components

* chore: remove unused i18n localization
2025-07-04 13:25:04 -04:00
Sebastien Bruel
458580ec87 🥅 refactor: Express App default Error Handling with ErrorController (#8249) 2025-07-04 13:24:57 -04:00
github-actions[bot]
4285d5841c 🌍 i18n: Update translation.json with latest translations (#8235)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-07-04 11:48:54 -04:00
Sebastien Bruel
5ee55cda4f 📦 chore: bump @modelcontextprotocol/sdk to 1.13.3 and cleanup mcp/connection.ts (#8241) 2025-07-04 09:28:57 -04:00
Danny Avila
404d40cbef 📦 chore: override @langchain/openai to v0.5.16 2025-07-03 23:16:42 -04:00
Danny Avila
f4680b016c 📦 chore: bump @librechat/agents to v2.4.51 (#8234) 2025-07-03 22:35:13 -04:00
Ruben Talstra
077224b351 feat: Add support for Armenian, Latvian, and Uyghur languages (#8227) 2025-07-03 11:16:33 -04:00
875 changed files with 34797 additions and 7463 deletions

View File

@@ -349,6 +349,11 @@ REGISTRATION_VIOLATION_SCORE=1
CONCURRENT_VIOLATION_SCORE=1
MESSAGE_VIOLATION_SCORE=1
NON_BROWSER_VIOLATION_SCORE=20
TTS_VIOLATION_SCORE=0
STT_VIOLATION_SCORE=0
FORK_VIOLATION_SCORE=0
IMPORT_VIOLATION_SCORE=0
FILE_UPLOAD_VIOLATION_SCORE=0
LOGIN_MAX=7
LOGIN_WINDOW=5
@@ -437,6 +442,8 @@ OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_USERNAME_CLAIM=
# Set to determine which user info property returned from OpenID Provider to store as the User's name
OPENID_NAME_CLAIM=
# Optional audience parameter for OpenID authorization requests
OPENID_AUDIENCE=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
@@ -575,6 +582,10 @@ ALLOW_SHARED_LINKS_PUBLIC=true
# If you have another service in front of your LibreChat doing compression, disable express based compression here
# DISABLE_COMPRESSION=true
# If you have gzipped version of uploaded image images in the same folder, this will enable gzip scan and serving of these images
# Note: The images folder will be scanned on startup and a ma kept in memory. Be careful for large number of images.
# ENABLE_IMAGE_OUTPUT_GZIP_SCAN=true
#===================================================#
# UI #
#===================================================#
@@ -592,11 +603,40 @@ HELP_AND_FAQ_URL=https://librechat.ai
# REDIS Options #
#===============#
# REDIS_URI=10.10.10.10:6379
# Enable Redis for caching and session storage
# USE_REDIS=true
# USE_REDIS_CLUSTER=true
# REDIS_CA=/path/to/ca.crt
# Single Redis instance
# REDIS_URI=redis://127.0.0.1:6379
# Redis cluster (multiple nodes)
# REDIS_URI=redis://127.0.0.1:7001,redis://127.0.0.1:7002,redis://127.0.0.1:7003
# Redis with TLS/SSL encryption and CA certificate
# REDIS_URI=rediss://127.0.0.1:6380
# REDIS_CA=/path/to/ca-cert.pem
# Redis authentication (if required)
# REDIS_USERNAME=your_redis_username
# REDIS_PASSWORD=your_redis_password
# Redis key prefix configuration
# Use environment variable name for dynamic prefix (recommended for cloud deployments)
# REDIS_KEY_PREFIX_VAR=K_REVISION
# Or use static prefix directly
# REDIS_KEY_PREFIX=librechat
# Redis connection limits
# REDIS_MAX_LISTENERS=40
# Redis ping interval in seconds (0 = disabled, >0 = enabled)
# When set to a positive integer, Redis clients will ping the server at this interval to keep connections alive
# When unset or 0, no pinging is performed (recommended for most use cases)
# REDIS_PING_INTERVAL=300
# Force specific cache namespaces to use in-memory storage even when Redis is enabled
# Comma-separated list of CacheKeys (e.g., STATIC_CONFIG,ROLES,MESSAGES)
# FORCED_IN_MEMORY_CACHE_NAMESPACES=STATIC_CONFIG,ROLES
#==================================================#
# Others #

View File

@@ -7,7 +7,7 @@ on:
- release/*
paths:
- 'api/**'
- 'packages/api/**'
- 'packages/**'
jobs:
tests_Backend:
name: Run Backend unit tests

58
.github/workflows/client.yml vendored Normal file
View File

@@ -0,0 +1,58 @@
name: Publish `@librechat/client` to NPM
on:
push:
branches:
- main
paths:
- 'packages/client/package.json'
workflow_dispatch:
inputs:
reason:
description: 'Reason for manual trigger'
required: false
default: 'Manual publish requested'
jobs:
build-and-publish:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '20.x'
- name: Install client dependencies
run: cd packages/client && npm ci
- name: Build client
run: cd packages/client && npm run build
- name: Set up npm authentication
run: echo "//registry.npmjs.org/:_authToken=${{ secrets.PUBLISH_NPM_TOKEN }}" > ~/.npmrc
- name: Check version change
id: check
working-directory: packages/client
run: |
PACKAGE_VERSION=$(node -p "require('./package.json').version")
PUBLISHED_VERSION=$(npm view @librechat/client version 2>/dev/null || echo "0.0.0")
if [ "$PACKAGE_VERSION" = "$PUBLISHED_VERSION" ]; then
echo "No version change, skipping publish"
echo "skip=true" >> $GITHUB_OUTPUT
else
echo "Version changed, proceeding with publish"
echo "skip=false" >> $GITHUB_OUTPUT
fi
- name: Pack package
if: steps.check.outputs.skip != 'true'
working-directory: packages/client
run: npm pack
- name: Publish
if: steps.check.outputs.skip != 'true'
working-directory: packages/client
run: npm publish *.tgz --access public

View File

@@ -22,7 +22,7 @@ jobs:
- name: Use Node.js
uses: actions/setup-node@v4
with:
node-version: '18.x'
node-version: '20.x'
- name: Install dependencies
run: cd packages/data-schemas && npm ci

View File

@@ -8,7 +8,7 @@ on:
- release/*
paths:
- 'client/**'
- 'packages/**'
- 'packages/data-provider/**'
jobs:
tests_frontend_ubuntu:

View File

@@ -1,95 +0,0 @@
name: Generate Release Changelog PR
on:
push:
tags:
- 'v*.*.*'
workflow_dispatch:
jobs:
generate-release-changelog-pr:
permissions:
contents: write # Needed for pushing commits and creating branches.
pull-requests: write
runs-on: ubuntu-latest
steps:
# 1. Checkout the repository (with full history).
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
# 2. Generate the release changelog using our custom configuration.
- name: Generate Release Changelog
id: generate_release
uses: mikepenz/release-changelog-builder-action@v5.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
configuration: ".github/configuration-release.json"
owner: ${{ github.repository_owner }}
repo: ${{ github.event.repository.name }}
outputFile: CHANGELOG-release.md
# 3. Update the main CHANGELOG.md:
# - If it doesn't exist, create it with a basic header.
# - Remove the "Unreleased" section (if present).
# - Prepend the new release changelog above previous releases.
# - Remove all temporary files before committing.
- name: Update CHANGELOG.md
run: |
# Determine the release tag, e.g. "v1.2.3"
TAG=${GITHUB_REF##*/}
echo "Using release tag: $TAG"
# Ensure CHANGELOG.md exists; if not, create a basic header.
if [ ! -f CHANGELOG.md ]; then
echo "# Changelog" > CHANGELOG.md
echo "" >> CHANGELOG.md
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
echo "" >> CHANGELOG.md
fi
echo "Updating CHANGELOG.md…"
# Remove the "Unreleased" section (from "## [Unreleased]" until the first occurrence of '---') if it exists.
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
awk '/^## \[Unreleased\]/{flag=1} flag && /^---/{flag=0; next} !flag' CHANGELOG.md > CHANGELOG.cleaned
else
cp CHANGELOG.md CHANGELOG.cleaned
fi
# Split the cleaned file into:
# - header.md: content before the first release header ("## [v...").
# - tail.md: content from the first release header onward.
awk '/^## \[v/{exit} {print}' CHANGELOG.cleaned > header.md
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.cleaned > tail.md
# Combine header, the new release changelog, and the tail.
echo "Combining updated changelog parts..."
cat header.md CHANGELOG-release.md > CHANGELOG.md.new
echo "" >> CHANGELOG.md.new
cat tail.md >> CHANGELOG.md.new
mv CHANGELOG.md.new CHANGELOG.md
# Remove temporary files.
rm -f CHANGELOG.cleaned header.md tail.md CHANGELOG-release.md
echo "Final CHANGELOG.md content:"
cat CHANGELOG.md
# 4. Create (or update) the Pull Request with the updated CHANGELOG.md.
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
commit-message: "chore: update CHANGELOG for release ${{ github.ref_name }}"
base: main
branch: "changelog/${{ github.ref_name }}"
reviewers: danny-avila
title: "📜 docs: Changelog for release ${{ github.ref_name }}"
body: |
**Description**:
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${{ github.ref_name }} above previous releases.

View File

@@ -1,107 +0,0 @@
name: Generate Unreleased Changelog PR
on:
schedule:
- cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC
workflow_dispatch:
jobs:
generate-unreleased-changelog-pr:
permissions:
contents: write # Needed for pushing commits and creating branches.
pull-requests: write
runs-on: ubuntu-latest
steps:
# 1. Checkout the repository on main.
- name: Checkout Repository on Main
uses: actions/checkout@v4
with:
ref: main
fetch-depth: 0
# 4. Get the latest version tag.
- name: Get Latest Tag
id: get_latest_tag
run: |
LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1) || echo "none")
echo "Latest tag: $LATEST_TAG"
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
# 5. Generate the Unreleased changelog.
- name: Generate Unreleased Changelog
id: generate_unreleased
uses: mikepenz/release-changelog-builder-action@v5.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
configuration: ".github/configuration-unreleased.json"
owner: ${{ github.repository_owner }}
repo: ${{ github.event.repository.name }}
outputFile: CHANGELOG-unreleased.md
fromTag: ${{ steps.get_latest_tag.outputs.tag }}
toTag: main
# 7. Update CHANGELOG.md with the new Unreleased section.
- name: Update CHANGELOG.md
id: update_changelog
run: |
# Create CHANGELOG.md if it doesn't exist.
if [ ! -f CHANGELOG.md ]; then
echo "# Changelog" > CHANGELOG.md
echo "" >> CHANGELOG.md
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
echo "" >> CHANGELOG.md
fi
echo "Updating CHANGELOG.md…"
# Extract content before the "## [Unreleased]" (or first version header if missing).
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
awk '/^## \[Unreleased\]/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
else
awk '/^## \[v/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
fi
# Append the generated Unreleased changelog.
echo "" >> CHANGELOG_TMP.md
cat CHANGELOG-unreleased.md >> CHANGELOG_TMP.md
echo "" >> CHANGELOG_TMP.md
# Append the remainder of the original changelog (starting from the first version header).
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.md >> CHANGELOG_TMP.md
# Replace the old file with the updated file.
mv CHANGELOG_TMP.md CHANGELOG.md
# Remove the temporary generated file.
rm -f CHANGELOG-unreleased.md
echo "Final CHANGELOG.md:"
cat CHANGELOG.md
# 8. Check if CHANGELOG.md has any updates.
- name: Check for CHANGELOG.md changes
id: changelog_changes
run: |
if git diff --quiet CHANGELOG.md; then
echo "has_changes=false" >> $GITHUB_OUTPUT
else
echo "has_changes=true" >> $GITHUB_OUTPUT
fi
# 9. Create (or update) the Pull Request only if there are changes.
- name: Create Pull Request
if: steps.changelog_changes.outputs.has_changes == 'true'
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
base: main
branch: "changelog/unreleased-update"
sign-commits: true
commit-message: "action: update Unreleased changelog"
title: "📜 docs: Unreleased Changelog"
body: |
**Description**:
- This PR updates the Unreleased section in CHANGELOG.md.
- It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}),
regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.

View File

@@ -6,6 +6,7 @@ on:
- "client/src/**"
- "api/**"
- "packages/data-provider/src/**"
- "packages/client/**"
jobs:
detect-unused-i18n-keys:
@@ -23,7 +24,7 @@ jobs:
# Define paths
I18N_FILE="client/src/locales/en/translation.json"
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src")
SOURCE_DIRS=("client/src" "api" "packages/data-provider/src" "packages/client")
# Check if translation file exists
if [[ ! -f "$I18N_FILE" ]]; then

View File

@@ -48,7 +48,7 @@ jobs:
# 2. Download translation files from locize.
- name: Download Translations from locize
uses: locize/download@v1
uses: locize/download@v2
with:
project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
path: "client/src/locales"

View File

@@ -7,6 +7,7 @@ on:
- 'package-lock.json'
- 'client/**'
- 'api/**'
- 'packages/client/**'
jobs:
detect-unused-packages:
@@ -28,7 +29,7 @@ jobs:
- name: Validate JSON files
run: |
for FILE in package.json client/package.json api/package.json; do
for FILE in package.json client/package.json api/package.json packages/client/package.json; do
if [[ -f "$FILE" ]]; then
jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1)
fi
@@ -63,12 +64,31 @@ jobs:
local folder=$1
local output_file=$2
if [[ -d "$folder" ]]; then
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,mjs,cjs} | \
# Extract require() statements
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,mjs,cjs} | \
# Extract ES6 imports - various patterns
# import x from 'module'
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# import 'module' (side-effect imports)
grep -rEho "import ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/import ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# export { x } from 'module' or export * from 'module'
grep -rEho "export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,tsx,jsx,mjs,cjs} | \
sed -E "s/export .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# import type { x } from 'module' (TypeScript)
grep -rEho "import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{ts,tsx} | \
sed -E "s/import type .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
# Remove subpath imports but keep the base package
# e.g., '@tanstack/react-query/devtools' becomes '@tanstack/react-query'
sed -i -E 's|^(@?[a-zA-Z0-9-]+(/[a-zA-Z0-9-]+)?)/.*|\1|' "$output_file"
sort -u "$output_file" -o "$output_file"
else
touch "$output_file"
@@ -78,13 +98,80 @@ jobs:
extract_deps_from_code "." root_used_code.txt
extract_deps_from_code "client" client_used_code.txt
extract_deps_from_code "api" api_used_code.txt
# Extract dependencies used by @librechat/client package
extract_deps_from_code "packages/client" packages_client_used_code.txt
- name: Get @librechat/client dependencies
id: get-librechat-client-deps
run: |
if [[ -f "packages/client/package.json" ]]; then
# Get all dependencies from @librechat/client (dependencies, devDependencies, and peerDependencies)
DEPS=$(jq -r '.dependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
DEV_DEPS=$(jq -r '.devDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
PEER_DEPS=$(jq -r '.peerDependencies // {} | keys[]' packages/client/package.json 2>/dev/null || echo "")
# Combine all dependencies
echo "$DEPS" > librechat_client_deps.txt
echo "$DEV_DEPS" >> librechat_client_deps.txt
echo "$PEER_DEPS" >> librechat_client_deps.txt
# Also include dependencies that are imported in packages/client
cat packages_client_used_code.txt >> librechat_client_deps.txt
# Remove empty lines and sort
grep -v '^$' librechat_client_deps.txt | sort -u > temp_deps.txt
mv temp_deps.txt librechat_client_deps.txt
else
touch librechat_client_deps.txt
fi
- name: Extract Workspace Dependencies
id: extract-workspace-deps
run: |
# Function to get dependencies from a workspace package that are used by another package
get_workspace_package_deps() {
local package_json=$1
local output_file=$2
# Get all workspace dependencies (starting with @librechat/)
if [[ -f "$package_json" ]]; then
local workspace_deps=$(jq -r '.dependencies // {} | to_entries[] | select(.key | startswith("@librechat/")) | .key' "$package_json" 2>/dev/null || echo "")
# For each workspace dependency, get its dependencies
for dep in $workspace_deps; do
# Convert @librechat/api to packages/api
local workspace_path=$(echo "$dep" | sed 's/@librechat\//packages\//')
local workspace_package_json="${workspace_path}/package.json"
if [[ -f "$workspace_package_json" ]]; then
# Extract all dependencies from the workspace package
jq -r '.dependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file"
# Also extract peerDependencies
jq -r '.peerDependencies // {} | keys[]' "$workspace_package_json" 2>/dev/null >> "$output_file"
fi
done
fi
if [[ -f "$output_file" ]]; then
sort -u "$output_file" -o "$output_file"
else
touch "$output_file"
fi
}
# Get workspace dependencies for each package
get_workspace_package_deps "package.json" root_workspace_deps.txt
get_workspace_package_deps "client/package.json" client_workspace_deps.txt
get_workspace_package_deps "api/package.json" api_workspace_deps.txt
- name: Run depcheck for root package.json
id: check-root
run: |
if [[ -f "package.json" ]]; then
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt | sort) || echo "")
# Exclude dependencies used in scripts, code, and workspace packages
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt root_workspace_deps.txt | sort) || echo "")
echo "ROOT_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
@@ -97,7 +184,8 @@ jobs:
chmod -R 755 client
cd client
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt | sort) || echo "")
# Exclude dependencies used in scripts, code, and workspace packages
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt ../client_workspace_deps.txt | sort) || echo "")
# Filter out false positives
UNUSED=$(echo "$UNUSED" | grep -v "^micromark-extension-llm-math$" || echo "")
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
@@ -113,7 +201,8 @@ jobs:
chmod -R 755 api
cd api
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt | sort) || echo "")
# Exclude dependencies used in scripts, code, and workspace packages
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt ../api_workspace_deps.txt | sort) || echo "")
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV

9
.gitignore vendored
View File

@@ -125,3 +125,12 @@ helm/**/.values.yaml
# SAML Idp cert
*.cert
# AI Assistants
/.claude/
/.cursor/
/.copilot/
/.aider/
/.openai/
/.tabnine/
/.codeium

View File

@@ -1,4 +1,4 @@
# v0.7.9-rc1
# v0.8.0-rc1
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,5 +1,5 @@
# Dockerfile.multi
# v0.7.9-rc1
# v0.8.0-rc1
# Base for all builds
FROM node:20-alpine AS base-min
@@ -16,6 +16,7 @@ COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/api/package*.json ./packages/api/
COPY packages/data-schemas/package*.json ./packages/data-schemas/
COPY packages/client/package*.json ./packages/client/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
@@ -45,11 +46,19 @@ COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/d
COPY --from=data-schemas-build /app/packages/data-schemas/dist /app/packages/data-schemas/dist
RUN npm run build
# Build `client` package
FROM base AS client-package-build
WORKDIR /app/packages/client
COPY packages/client ./
RUN npm run build
# Client build
FROM base AS client-build
WORKDIR /app/client
COPY client ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
COPY --from=client-package-build /app/packages/client/dist /app/packages/client/dist
COPY --from=client-package-build /app/packages/client/src /app/packages/client/src
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build

View File

@@ -52,7 +52,7 @@
- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features
- 🤖 **AI Model Selection**:
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Assistants API (incl. Azure)
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Responses API (incl. Azure)
- [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required
- Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):
- Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai,
@@ -66,10 +66,9 @@
- 🔦 **Agents & Tools Integration**:
- **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**:
- No-Code Custom Assistants: Build specialized, AI-driven helpers without coding
- Flexible & Extensible: Attach tools like DALL-E-3, file search, code execution, and more
- Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, and more
- Flexible & Extensible: Use MCP Servers, tools, file search, code execution, and more
- Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, Google, Vertex AI, Responses API, and more
- [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions
- 🔍 **Web Search**:
- Search the internet and retrieve relevant information to enhance your AI context

View File

@@ -108,12 +108,15 @@ class BaseClient {
/**
* Abstract method to record token usage. Subclasses must implement this method.
* If a correction to the token usage is needed, the method should return an object with the corrected token counts.
* Should only be used if `recordCollectedUsage` was not used instead.
* @param {string} [model]
* @param {number} promptTokens
* @param {number} completionTokens
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens }) {
async recordTokenUsage({ model, promptTokens, completionTokens }) {
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
model,
promptTokens,
completionTokens,
});
@@ -197,6 +200,10 @@ class BaseClient {
this.currentMessages[this.currentMessages.length - 1].messageId = head;
}
if (opts.isRegenerate && responseMessageId.endsWith('_')) {
responseMessageId = crypto.randomUUID();
}
this.responseMessageId = responseMessageId;
return {
@@ -737,9 +744,13 @@ class BaseClient {
} else {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
completionTokens = responseMessage.tokenCount;
await this.recordTokenUsage({
usage,
promptTokens,
completionTokens,
model: responseMessage.model,
});
}
await this.recordTokenUsage({ promptTokens, completionTokens, usage });
}
if (userMessagePromise) {

View File

@@ -237,41 +237,9 @@ const formatAgentMessages = (payload) => {
return messages;
};
/**
* Formats an array of messages for LangChain, making sure all content fields are strings
* @param {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} payload - The array of messages to format.
* @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
*/
const formatContentStrings = (payload) => {
const messages = [];
for (const message of payload) {
if (typeof message.content === 'string') {
continue;
}
if (!Array.isArray(message.content)) {
continue;
}
// Reduce text types to a single string, ignore all other types
const content = message.content.reduce((acc, curr) => {
if (curr.type === ContentTypes.TEXT) {
return `${acc}${curr[ContentTypes.TEXT]}\n`;
}
return acc;
}, '');
message.content = content.trim();
}
return messages;
};
module.exports = {
formatMessage,
formatFromLangChain,
formatAgentMessages,
formatContentStrings,
formatLangChainMessages,
};

View File

@@ -422,6 +422,46 @@ describe('BaseClient', () => {
expect(response).toEqual(expectedResult);
});
test('should replace responseMessageId with new UUID when isRegenerate is true and messageId ends with underscore', async () => {
const mockCrypto = require('crypto');
const newUUID = 'new-uuid-1234';
jest.spyOn(mockCrypto, 'randomUUID').mockReturnValue(newUUID);
const opts = {
isRegenerate: true,
responseMessageId: 'existing-message-id_',
};
await TestClient.setMessageOptions(opts);
expect(TestClient.responseMessageId).toBe(newUUID);
expect(TestClient.responseMessageId).not.toBe('existing-message-id_');
mockCrypto.randomUUID.mockRestore();
});
test('should not replace responseMessageId when isRegenerate is false', async () => {
const opts = {
isRegenerate: false,
responseMessageId: 'existing-message-id_',
};
await TestClient.setMessageOptions(opts);
expect(TestClient.responseMessageId).toBe('existing-message-id_');
});
test('should not replace responseMessageId when it does not end with underscore', async () => {
const opts = {
isRegenerate: true,
responseMessageId: 'existing-message-id',
};
await TestClient.setMessageOptions(opts);
expect(TestClient.responseMessageId).toBe('existing-message-id');
});
test('sendMessage should work with provided conversationId and parentMessageId', async () => {
const userMessage = 'Second message in the conversation';
const opts = {

View File

@@ -3,8 +3,8 @@ const path = require('path');
const OpenAI = require('openai');
const fetch = require('node-fetch');
const { v4: uuidv4 } = require('uuid');
const { ProxyAgent } = require('undici');
const { Tool } = require('@langchain/core/tools');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { FileContext, ContentTypes } = require('librechat-data-provider');
const { getImageBasename } = require('~/server/services/Files/images');
const extractBaseURL = require('~/utils/extractBaseURL');
@@ -46,7 +46,10 @@ class DALLE3 extends Tool {
}
if (process.env.PROXY) {
config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
const proxyAgent = new ProxyAgent(process.env.PROXY);
config.fetchOptions = {
dispatcher: proxyAgent,
};
}
/** @type {OpenAI} */
@@ -163,7 +166,8 @@ Error Message: ${error.message}`);
if (this.isAgent) {
let fetchOptions = {};
if (process.env.PROXY) {
fetchOptions.agent = new HttpsProxyAgent(process.env.PROXY);
const proxyAgent = new ProxyAgent(process.env.PROXY);
fetchOptions.dispatcher = proxyAgent;
}
const imageResponse = await fetch(theImageUrl, fetchOptions);
const arrayBuffer = await imageResponse.arrayBuffer();

View File

@@ -3,10 +3,10 @@ const axios = require('axios');
const { v4 } = require('uuid');
const OpenAI = require('openai');
const FormData = require('form-data');
const { ProxyAgent } = require('undici');
const { tool } = require('@langchain/core/tools');
const { logAxiosError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ContentTypes, EImageOutputType } = require('librechat-data-provider');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { extractBaseURL } = require('~/utils');
@@ -189,7 +189,10 @@ function createOpenAIImageTools(fields = {}) {
}
const clientConfig = { ...closureConfig };
if (process.env.PROXY) {
clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
const proxyAgent = new ProxyAgent(process.env.PROXY);
clientConfig.fetchOptions = {
dispatcher: proxyAgent,
};
}
/** @type {OpenAI} */
@@ -335,7 +338,10 @@ Error Message: ${error.message}`);
const clientConfig = { ...closureConfig };
if (process.env.PROXY) {
clientConfig.httpAgent = new HttpsProxyAgent(process.env.PROXY);
const proxyAgent = new ProxyAgent(process.env.PROXY);
clientConfig.fetchOptions = {
dispatcher: proxyAgent,
};
}
const formData = new FormData();
@@ -447,6 +453,19 @@ Error Message: ${error.message}`);
baseURL,
};
if (process.env.PROXY) {
try {
const url = new URL(process.env.PROXY);
axiosConfig.proxy = {
host: url.hostname.replace(/^\[|\]$/g, ''),
port: url.port ? parseInt(url.port, 10) : undefined,
protocol: url.protocol.replace(':', ''),
};
} catch (error) {
logger.error('Error parsing proxy URL:', error);
}
}
if (process.env.IMAGE_GEN_OAI_AZURE_API_VERSION && process.env.IMAGE_GEN_OAI_BASEURL) {
axiosConfig.params = {
'api-version': process.env.IMAGE_GEN_OAI_AZURE_API_VERSION,

View File

@@ -0,0 +1,94 @@
const DALLE3 = require('../DALLE3');
const { ProxyAgent } = require('undici');
const processFileURL = jest.fn();
jest.mock('~/server/services/Files/images', () => ({
getImageBasename: jest.fn().mockImplementation((url) => {
const parts = url.split('/');
const lastPart = parts.pop();
const imageExtensionRegex = /\.(jpg|jpeg|png|gif|bmp|tiff|svg)$/i;
if (imageExtensionRegex.test(lastPart)) {
return lastPart;
}
return '';
}),
}));
jest.mock('fs', () => {
return {
existsSync: jest.fn(),
mkdirSync: jest.fn(),
promises: {
writeFile: jest.fn(),
readFile: jest.fn(),
unlink: jest.fn(),
},
};
});
jest.mock('path', () => {
return {
resolve: jest.fn(),
join: jest.fn(),
relative: jest.fn(),
extname: jest.fn().mockImplementation((filename) => {
return filename.slice(filename.lastIndexOf('.'));
}),
};
});
describe('DALLE3 Proxy Configuration', () => {
let originalEnv;
beforeAll(() => {
originalEnv = { ...process.env };
});
beforeEach(() => {
jest.resetModules();
process.env = { ...originalEnv };
});
afterEach(() => {
process.env = originalEnv;
});
it('should configure ProxyAgent in fetchOptions.dispatcher when PROXY env is set', () => {
// Set proxy environment variable
process.env.PROXY = 'http://proxy.example.com:8080';
process.env.DALLE_API_KEY = 'test-api-key';
// Create instance
const dalleWithProxy = new DALLE3({ processFileURL });
// Check that the openai client exists
expect(dalleWithProxy.openai).toBeDefined();
// Check that _options exists and has fetchOptions with a dispatcher
expect(dalleWithProxy.openai._options).toBeDefined();
expect(dalleWithProxy.openai._options.fetchOptions).toBeDefined();
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeDefined();
expect(dalleWithProxy.openai._options.fetchOptions.dispatcher).toBeInstanceOf(ProxyAgent);
});
it('should not configure ProxyAgent when PROXY env is not set', () => {
// Ensure PROXY is not set
delete process.env.PROXY;
process.env.DALLE_API_KEY = 'test-api-key';
// Create instance
const dalleWithoutProxy = new DALLE3({ processFileURL });
// Check that the openai client exists
expect(dalleWithoutProxy.openai).toBeDefined();
// Check that _options exists but fetchOptions either doesn't exist or doesn't have a dispatcher
expect(dalleWithoutProxy.openai._options).toBeDefined();
// fetchOptions should either not exist or not have a dispatcher
if (dalleWithoutProxy.openai._options.fetchOptions) {
expect(dalleWithoutProxy.openai._options.fetchOptions.dispatcher).toBeUndefined();
}
});
});

View File

@@ -11,17 +11,25 @@ const { getFiles } = require('~/models/File');
* @param {Object} options
* @param {ServerRequest} options.req
* @param {Agent['tool_resources']} options.tool_resources
* @param {string} [options.agentId] - The agent ID for file access control
* @returns {Promise<{
* files: Array<{ file_id: string; filename: string }>,
* toolContext: string
* }>}
*/
const primeFiles = async (options) => {
const { tool_resources } = options;
const { tool_resources, req, agentId } = options;
const file_ids = tool_resources?.[EToolResources.file_search]?.file_ids ?? [];
const agentResourceIds = new Set(file_ids);
const resourceFiles = tool_resources?.[EToolResources.file_search]?.files ?? [];
const dbFiles = ((await getFiles({ file_id: { $in: file_ids } })) ?? []).concat(resourceFiles);
const dbFiles = (
(await getFiles(
{ file_id: { $in: file_ids } },
null,
{ text: 0 },
{ userId: req?.user?.id, agentId },
)) ?? []
).concat(resourceFiles);
let toolContext = `- Note: Semantic search is available through the ${Tools.file_search} tool but no files are currently loaded. Request the user to upload documents to search through.`;

View File

@@ -1,14 +1,9 @@
const { mcpToolPattern } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { SerpAPI } = require('@langchain/community/tools/serpapi');
const { Calculator } = require('@langchain/community/tools/calculator');
const { mcpToolPattern, loadWebSearchAuth } = require('@librechat/api');
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
const {
Tools,
EToolResources,
loadWebSearchAuth,
replaceSpecialVars,
} = require('librechat-data-provider');
const { Tools, EToolResources, replaceSpecialVars } = require('librechat-data-provider');
const {
availableTools,
manifestToolMap,
@@ -235,7 +230,7 @@ const loadTools = async ({
/** @type {Record<string, string>} */
const toolContextMap = {};
const appTools = (await getCachedTools({ includeGlobal: true })) ?? {};
const cachedTools = (await getCachedTools({ userId: user, includeGlobal: true })) ?? {};
for (const tool of tools) {
if (tool === Tools.execute_code) {
@@ -245,7 +240,13 @@ const loadTools = async ({
authFields: [EnvVar.CODE_API_KEY],
});
const codeApiKey = authValues[EnvVar.CODE_API_KEY];
const { files, toolContext } = await primeCodeFiles(options, codeApiKey);
const { files, toolContext } = await primeCodeFiles(
{
...options,
agentId: agent?.id,
},
codeApiKey,
);
if (toolContext) {
toolContextMap[tool] = toolContext;
}
@@ -260,7 +261,10 @@ const loadTools = async ({
continue;
} else if (tool === Tools.file_search) {
requestedTools[tool] = async () => {
const { files, toolContext } = await primeSearchFiles(options);
const { files, toolContext } = await primeSearchFiles({
...options,
agentId: agent?.id,
});
if (toolContext) {
toolContextMap[tool] = toolContext;
}
@@ -294,7 +298,7 @@ Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
});
};
continue;
} else if (tool && appTools[tool] && mcpToolPattern.test(tool)) {
} else if (tool && cachedTools && mcpToolPattern.test(tool)) {
requestedTools[tool] = async () =>
createMCPTool({
req: options.req,

62
api/cache/cacheConfig.js vendored Normal file
View File

@@ -0,0 +1,62 @@
const fs = require('fs');
const { math, isEnabled } = require('@librechat/api');
const { CacheKeys } = require('librechat-data-provider');
// To ensure that different deployments do not interfere with each other's cache, we use a prefix for the Redis keys.
// This prefix is usually the deployment ID, which is often passed to the container or pod as an env var.
// Set REDIS_KEY_PREFIX_VAR to the env var that contains the deployment ID.
const REDIS_KEY_PREFIX_VAR = process.env.REDIS_KEY_PREFIX_VAR;
const REDIS_KEY_PREFIX = process.env.REDIS_KEY_PREFIX;
if (REDIS_KEY_PREFIX_VAR && REDIS_KEY_PREFIX) {
throw new Error('Only either REDIS_KEY_PREFIX_VAR or REDIS_KEY_PREFIX can be set.');
}
const USE_REDIS = isEnabled(process.env.USE_REDIS);
if (USE_REDIS && !process.env.REDIS_URI) {
throw new Error('USE_REDIS is enabled but REDIS_URI is not set.');
}
// Comma-separated list of cache namespaces that should be forced to use in-memory storage
// even when Redis is enabled. This allows selective performance optimization for specific caches.
const FORCED_IN_MEMORY_CACHE_NAMESPACES = process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES
? process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES.split(',').map((key) => key.trim())
: [];
// Validate against CacheKeys enum
if (FORCED_IN_MEMORY_CACHE_NAMESPACES.length > 0) {
const validKeys = Object.values(CacheKeys);
const invalidKeys = FORCED_IN_MEMORY_CACHE_NAMESPACES.filter((key) => !validKeys.includes(key));
if (invalidKeys.length > 0) {
throw new Error(
`Invalid cache keys in FORCED_IN_MEMORY_CACHE_NAMESPACES: ${invalidKeys.join(', ')}. Valid keys: ${validKeys.join(', ')}`,
);
}
}
const cacheConfig = {
FORCED_IN_MEMORY_CACHE_NAMESPACES,
USE_REDIS,
REDIS_URI: process.env.REDIS_URI,
REDIS_USERNAME: process.env.REDIS_USERNAME,
REDIS_PASSWORD: process.env.REDIS_PASSWORD,
REDIS_CA: process.env.REDIS_CA ? fs.readFileSync(process.env.REDIS_CA, 'utf8') : null,
REDIS_KEY_PREFIX: process.env[REDIS_KEY_PREFIX_VAR] || REDIS_KEY_PREFIX || '',
REDIS_MAX_LISTENERS: math(process.env.REDIS_MAX_LISTENERS, 40),
REDIS_PING_INTERVAL: math(process.env.REDIS_PING_INTERVAL, 0),
/** Max delay between reconnection attempts in ms */
REDIS_RETRY_MAX_DELAY: math(process.env.REDIS_RETRY_MAX_DELAY, 3000),
/** Max number of reconnection attempts (0 = infinite) */
REDIS_RETRY_MAX_ATTEMPTS: math(process.env.REDIS_RETRY_MAX_ATTEMPTS, 10),
/** Connection timeout in ms */
REDIS_CONNECT_TIMEOUT: math(process.env.REDIS_CONNECT_TIMEOUT, 10000),
/** Queue commands when disconnected */
REDIS_ENABLE_OFFLINE_QUEUE: isEnabled(process.env.REDIS_ENABLE_OFFLINE_QUEUE ?? 'true'),
CI: isEnabled(process.env.CI),
DEBUG_MEMORY_CACHE: isEnabled(process.env.DEBUG_MEMORY_CACHE),
BAN_DURATION: math(process.env.BAN_DURATION, 7200000), // 2 hours
};
module.exports = { cacheConfig };

157
api/cache/cacheConfig.spec.js vendored Normal file
View File

@@ -0,0 +1,157 @@
const fs = require('fs');
describe('cacheConfig', () => {
let originalEnv;
let originalReadFileSync;
beforeEach(() => {
originalEnv = { ...process.env };
originalReadFileSync = fs.readFileSync;
// Clear all related env vars first
delete process.env.REDIS_URI;
delete process.env.REDIS_CA;
delete process.env.REDIS_KEY_PREFIX_VAR;
delete process.env.REDIS_KEY_PREFIX;
delete process.env.USE_REDIS;
delete process.env.REDIS_PING_INTERVAL;
delete process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES;
// Clear require cache
jest.resetModules();
});
afterEach(() => {
process.env = originalEnv;
fs.readFileSync = originalReadFileSync;
jest.resetModules();
});
describe('REDIS_KEY_PREFIX validation and resolution', () => {
test('should throw error when both REDIS_KEY_PREFIX_VAR and REDIS_KEY_PREFIX are set', () => {
process.env.REDIS_KEY_PREFIX_VAR = 'DEPLOYMENT_ID';
process.env.REDIS_KEY_PREFIX = 'manual-prefix';
expect(() => {
require('./cacheConfig');
}).toThrow('Only either REDIS_KEY_PREFIX_VAR or REDIS_KEY_PREFIX can be set.');
});
test('should resolve REDIS_KEY_PREFIX from variable reference', () => {
process.env.REDIS_KEY_PREFIX_VAR = 'DEPLOYMENT_ID';
process.env.DEPLOYMENT_ID = 'test-deployment-123';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('test-deployment-123');
});
test('should use direct REDIS_KEY_PREFIX value', () => {
process.env.REDIS_KEY_PREFIX = 'direct-prefix';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('direct-prefix');
});
test('should default to empty string when no prefix is configured', () => {
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('');
});
test('should handle empty variable reference', () => {
process.env.REDIS_KEY_PREFIX_VAR = 'EMPTY_VAR';
process.env.EMPTY_VAR = '';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('');
});
test('should handle undefined variable reference', () => {
process.env.REDIS_KEY_PREFIX_VAR = 'UNDEFINED_VAR';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_KEY_PREFIX).toBe('');
});
});
describe('USE_REDIS and REDIS_URI validation', () => {
test('should throw error when USE_REDIS is enabled but REDIS_URI is not set', () => {
process.env.USE_REDIS = 'true';
expect(() => {
require('./cacheConfig');
}).toThrow('USE_REDIS is enabled but REDIS_URI is not set.');
});
test('should not throw error when USE_REDIS is enabled and REDIS_URI is set', () => {
process.env.USE_REDIS = 'true';
process.env.REDIS_URI = 'redis://localhost:6379';
expect(() => {
require('./cacheConfig');
}).not.toThrow();
});
test('should handle empty REDIS_URI when USE_REDIS is enabled', () => {
process.env.USE_REDIS = 'true';
process.env.REDIS_URI = '';
expect(() => {
require('./cacheConfig');
}).toThrow('USE_REDIS is enabled but REDIS_URI is not set.');
});
});
describe('REDIS_CA file reading', () => {
test('should be null when REDIS_CA is not set', () => {
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_CA).toBeNull();
});
});
describe('REDIS_PING_INTERVAL configuration', () => {
test('should default to 0 when REDIS_PING_INTERVAL is not set', () => {
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_PING_INTERVAL).toBe(0);
});
test('should use provided REDIS_PING_INTERVAL value', () => {
process.env.REDIS_PING_INTERVAL = '300';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.REDIS_PING_INTERVAL).toBe(300);
});
});
describe('FORCED_IN_MEMORY_CACHE_NAMESPACES validation', () => {
test('should parse comma-separated cache keys correctly', () => {
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = ' ROLES, STATIC_CONFIG ,MESSAGES ';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual([
'ROLES',
'STATIC_CONFIG',
'MESSAGES',
]);
});
test('should throw error for invalid cache keys', () => {
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = 'INVALID_KEY,ROLES';
expect(() => {
require('./cacheConfig');
}).toThrow('Invalid cache keys in FORCED_IN_MEMORY_CACHE_NAMESPACES: INVALID_KEY');
});
test('should handle empty string gracefully', () => {
process.env.FORCED_IN_MEMORY_CACHE_NAMESPACES = '';
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual([]);
});
test('should handle undefined env var gracefully', () => {
const { cacheConfig } = require('./cacheConfig');
expect(cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES).toEqual([]);
});
});
});

108
api/cache/cacheFactory.js vendored Normal file
View File

@@ -0,0 +1,108 @@
const KeyvRedis = require('@keyv/redis').default;
const { Keyv } = require('keyv');
const { RedisStore } = require('rate-limit-redis');
const { Time } = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { RedisStore: ConnectRedis } = require('connect-redis');
const MemoryStore = require('memorystore')(require('express-session'));
const { keyvRedisClient, ioredisClient, GLOBAL_PREFIX_SEPARATOR } = require('./redisClients');
const { cacheConfig } = require('./cacheConfig');
const { violationFile } = require('./keyvFiles');
/**
* Creates a cache instance using Redis or a fallback store. Suitable for general caching needs.
* @param {string} namespace - The cache namespace.
* @param {number} [ttl] - Time to live for cache entries.
* @param {object} [fallbackStore] - Optional fallback store if Redis is not used.
* @returns {Keyv} Cache instance.
*/
const standardCache = (namespace, ttl = undefined, fallbackStore = undefined) => {
if (
cacheConfig.USE_REDIS &&
!cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES?.includes(namespace)
) {
try {
const keyvRedis = new KeyvRedis(keyvRedisClient);
const cache = new Keyv(keyvRedis, { namespace, ttl });
keyvRedis.namespace = cacheConfig.REDIS_KEY_PREFIX;
keyvRedis.keyPrefixSeparator = GLOBAL_PREFIX_SEPARATOR;
cache.on('error', (err) => {
logger.error(`Cache error in namespace ${namespace}:`, err);
});
return cache;
} catch (err) {
logger.error(`Failed to create Redis cache for namespace ${namespace}:`, err);
throw err;
}
}
if (fallbackStore) return new Keyv({ store: fallbackStore, namespace, ttl });
return new Keyv({ namespace, ttl });
};
/**
* Creates a cache instance for storing violation data.
* Uses a file-based fallback store if Redis is not enabled.
* @param {string} namespace - The cache namespace for violations.
* @param {number} [ttl] - Time to live for cache entries.
* @returns {Keyv} Cache instance for violations.
*/
const violationCache = (namespace, ttl = undefined) => {
return standardCache(`violations:${namespace}`, ttl, violationFile);
};
/**
* Creates a session cache instance using Redis or in-memory store.
* @param {string} namespace - The session namespace.
* @param {number} [ttl] - Time to live for session entries.
* @returns {MemoryStore | ConnectRedis} Session store instance.
*/
const sessionCache = (namespace, ttl = undefined) => {
namespace = namespace.endsWith(':') ? namespace : `${namespace}:`;
if (!cacheConfig.USE_REDIS) return new MemoryStore({ ttl, checkPeriod: Time.ONE_DAY });
const store = new ConnectRedis({ client: ioredisClient, ttl, prefix: namespace });
if (ioredisClient) {
ioredisClient.on('error', (err) => {
logger.error(`Session store Redis error for namespace ${namespace}:`, err);
});
}
return store;
};
/**
* Creates a rate limiter cache using Redis.
* @param {string} prefix - The key prefix for rate limiting.
* @returns {RedisStore|undefined} RedisStore instance or undefined if Redis is not used.
*/
const limiterCache = (prefix) => {
if (!prefix) throw new Error('prefix is required');
if (!cacheConfig.USE_REDIS) return undefined;
prefix = prefix.endsWith(':') ? prefix : `${prefix}:`;
try {
if (!ioredisClient) {
logger.warn(`Redis client not available for rate limiter with prefix ${prefix}`);
return undefined;
}
return new RedisStore({ sendCommand, prefix });
} catch (err) {
logger.error(`Failed to create Redis rate limiter for prefix ${prefix}:`, err);
return undefined;
}
};
const sendCommand = (...args) => {
if (!ioredisClient) {
logger.warn('Redis client not available for command execution');
return Promise.reject(new Error('Redis client not available'));
}
return ioredisClient.call(...args).catch((err) => {
logger.error('Redis command execution failed:', err);
throw err;
});
};
module.exports = { standardCache, sessionCache, violationCache, limiterCache };

432
api/cache/cacheFactory.spec.js vendored Normal file
View File

@@ -0,0 +1,432 @@
const { Time } = require('librechat-data-provider');
// Mock dependencies first
const mockKeyvRedis = {
namespace: '',
keyPrefixSeparator: '',
};
const mockKeyv = jest.fn().mockReturnValue({
mock: 'keyv',
on: jest.fn(),
});
const mockConnectRedis = jest.fn().mockReturnValue({ mock: 'connectRedis' });
const mockMemoryStore = jest.fn().mockReturnValue({ mock: 'memoryStore' });
const mockRedisStore = jest.fn().mockReturnValue({ mock: 'redisStore' });
const mockIoredisClient = {
call: jest.fn(),
on: jest.fn(),
};
const mockKeyvRedisClient = {};
const mockViolationFile = {};
// Mock modules before requiring the main module
jest.mock('@keyv/redis', () => ({
default: jest.fn().mockImplementation(() => mockKeyvRedis),
}));
jest.mock('keyv', () => ({
Keyv: mockKeyv,
}));
jest.mock('./cacheConfig', () => ({
cacheConfig: {
USE_REDIS: false,
REDIS_KEY_PREFIX: 'test',
FORCED_IN_MEMORY_CACHE_NAMESPACES: [],
},
}));
jest.mock('./redisClients', () => ({
keyvRedisClient: mockKeyvRedisClient,
ioredisClient: mockIoredisClient,
GLOBAL_PREFIX_SEPARATOR: '::',
}));
jest.mock('./keyvFiles', () => ({
violationFile: mockViolationFile,
}));
jest.mock('connect-redis', () => ({ RedisStore: mockConnectRedis }));
jest.mock('memorystore', () => jest.fn(() => mockMemoryStore));
jest.mock('rate-limit-redis', () => ({
RedisStore: mockRedisStore,
}));
jest.mock('@librechat/data-schemas', () => ({
logger: {
error: jest.fn(),
warn: jest.fn(),
info: jest.fn(),
},
}));
// Import after mocking
const { standardCache, sessionCache, violationCache, limiterCache } = require('./cacheFactory');
const { cacheConfig } = require('./cacheConfig');
describe('cacheFactory', () => {
beforeEach(() => {
jest.clearAllMocks();
// Reset cache config mock
cacheConfig.USE_REDIS = false;
cacheConfig.REDIS_KEY_PREFIX = 'test';
cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES = [];
});
describe('redisCache', () => {
it('should create Redis cache when USE_REDIS is true', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'test-namespace';
const ttl = 3600;
standardCache(namespace, ttl);
expect(require('@keyv/redis').default).toHaveBeenCalledWith(mockKeyvRedisClient);
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl });
expect(mockKeyvRedis.namespace).toBe(cacheConfig.REDIS_KEY_PREFIX);
expect(mockKeyvRedis.keyPrefixSeparator).toBe('::');
});
it('should create Redis cache with undefined ttl when not provided', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'test-namespace';
standardCache(namespace);
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl: undefined });
});
it('should use fallback store when USE_REDIS is false and fallbackStore is provided', () => {
cacheConfig.USE_REDIS = false;
const namespace = 'test-namespace';
const ttl = 3600;
const fallbackStore = { some: 'store' };
standardCache(namespace, ttl, fallbackStore);
expect(mockKeyv).toHaveBeenCalledWith({ store: fallbackStore, namespace, ttl });
});
it('should create default Keyv instance when USE_REDIS is false and no fallbackStore', () => {
cacheConfig.USE_REDIS = false;
const namespace = 'test-namespace';
const ttl = 3600;
standardCache(namespace, ttl);
expect(mockKeyv).toHaveBeenCalledWith({ namespace, ttl });
});
it('should handle namespace and ttl as undefined', () => {
cacheConfig.USE_REDIS = false;
standardCache();
expect(mockKeyv).toHaveBeenCalledWith({ namespace: undefined, ttl: undefined });
});
it('should use fallback when namespace is in FORCED_IN_MEMORY_CACHE_NAMESPACES', () => {
cacheConfig.USE_REDIS = true;
cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES = ['forced-memory'];
const namespace = 'forced-memory';
const ttl = 3600;
standardCache(namespace, ttl);
expect(require('@keyv/redis').default).not.toHaveBeenCalled();
expect(mockKeyv).toHaveBeenCalledWith({ namespace, ttl });
});
it('should use Redis when namespace is not in FORCED_IN_MEMORY_CACHE_NAMESPACES', () => {
cacheConfig.USE_REDIS = true;
cacheConfig.FORCED_IN_MEMORY_CACHE_NAMESPACES = ['other-namespace'];
const namespace = 'test-namespace';
const ttl = 3600;
standardCache(namespace, ttl);
expect(require('@keyv/redis').default).toHaveBeenCalledWith(mockKeyvRedisClient);
expect(mockKeyv).toHaveBeenCalledWith(mockKeyvRedis, { namespace, ttl });
});
it('should throw error when Redis cache creation fails', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'test-namespace';
const ttl = 3600;
const testError = new Error('Redis connection failed');
const KeyvRedis = require('@keyv/redis').default;
KeyvRedis.mockImplementationOnce(() => {
throw testError;
});
expect(() => standardCache(namespace, ttl)).toThrow('Redis connection failed');
const { logger } = require('@librechat/data-schemas');
expect(logger.error).toHaveBeenCalledWith(
`Failed to create Redis cache for namespace ${namespace}:`,
testError,
);
expect(mockKeyv).not.toHaveBeenCalled();
});
});
describe('violationCache', () => {
it('should create violation cache with prefixed namespace', () => {
const namespace = 'test-violations';
const ttl = 7200;
// We can't easily mock the internal redisCache call since it's in the same module
// But we can test that the function executes without throwing
expect(() => violationCache(namespace, ttl)).not.toThrow();
});
it('should create violation cache with undefined ttl', () => {
const namespace = 'test-violations';
violationCache(namespace);
// The function should call redisCache with violations: prefixed namespace
// Since we can't easily mock the internal redisCache call, we test the behavior
expect(() => violationCache(namespace)).not.toThrow();
});
it('should handle undefined namespace', () => {
expect(() => violationCache(undefined)).not.toThrow();
});
});
describe('sessionCache', () => {
it('should return MemoryStore when USE_REDIS is false', () => {
cacheConfig.USE_REDIS = false;
const namespace = 'sessions';
const ttl = 86400;
const result = sessionCache(namespace, ttl);
expect(mockMemoryStore).toHaveBeenCalledWith({ ttl, checkPeriod: Time.ONE_DAY });
expect(result).toBe(mockMemoryStore());
});
it('should return ConnectRedis when USE_REDIS is true', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
const ttl = 86400;
const result = sessionCache(namespace, ttl);
expect(mockConnectRedis).toHaveBeenCalledWith({
client: mockIoredisClient,
ttl,
prefix: `${namespace}:`,
});
expect(result).toBe(mockConnectRedis());
});
it('should add colon to namespace if not present', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
sessionCache(namespace);
expect(mockConnectRedis).toHaveBeenCalledWith({
client: mockIoredisClient,
ttl: undefined,
prefix: 'sessions:',
});
});
it('should not add colon to namespace if already present', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions:';
sessionCache(namespace);
expect(mockConnectRedis).toHaveBeenCalledWith({
client: mockIoredisClient,
ttl: undefined,
prefix: 'sessions:',
});
});
it('should handle undefined ttl', () => {
cacheConfig.USE_REDIS = false;
const namespace = 'sessions';
sessionCache(namespace);
expect(mockMemoryStore).toHaveBeenCalledWith({
ttl: undefined,
checkPeriod: Time.ONE_DAY,
});
});
it('should throw error when ConnectRedis constructor fails', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
const ttl = 86400;
// Mock ConnectRedis to throw an error during construction
const redisError = new Error('Redis connection failed');
mockConnectRedis.mockImplementationOnce(() => {
throw redisError;
});
// The error should propagate up, not be caught
expect(() => sessionCache(namespace, ttl)).toThrow('Redis connection failed');
// Verify that MemoryStore was NOT used as fallback
expect(mockMemoryStore).not.toHaveBeenCalled();
});
it('should register error handler but let errors propagate to Express', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
// Create a mock session store with middleware methods
const mockSessionStore = {
get: jest.fn(),
set: jest.fn(),
destroy: jest.fn(),
};
mockConnectRedis.mockReturnValue(mockSessionStore);
const store = sessionCache(namespace);
// Verify error handler was registered
expect(mockIoredisClient.on).toHaveBeenCalledWith('error', expect.any(Function));
// Get the error handler
const errorHandler = mockIoredisClient.on.mock.calls.find((call) => call[0] === 'error')[1];
// Simulate an error from Redis during a session operation
const redisError = new Error('Socket closed unexpectedly');
// The error handler should log but not swallow the error
const { logger } = require('@librechat/data-schemas');
errorHandler(redisError);
expect(logger.error).toHaveBeenCalledWith(
`Session store Redis error for namespace ${namespace}::`,
redisError,
);
// Now simulate what happens when session middleware tries to use the store
const callback = jest.fn();
mockSessionStore.get.mockImplementation((sid, cb) => {
cb(new Error('Redis connection lost'));
});
// Call the store's get method (as Express session would)
store.get('test-session-id', callback);
// The error should be passed to the callback, not swallowed
expect(callback).toHaveBeenCalledWith(new Error('Redis connection lost'));
});
it('should handle null ioredisClient gracefully', () => {
cacheConfig.USE_REDIS = true;
const namespace = 'sessions';
// Temporarily set ioredisClient to null (simulating connection not established)
const originalClient = require('./redisClients').ioredisClient;
require('./redisClients').ioredisClient = null;
// ConnectRedis might accept null client but would fail on first use
// The important thing is it doesn't throw uncaught exceptions during construction
const store = sessionCache(namespace);
expect(store).toBeDefined();
// Restore original client
require('./redisClients').ioredisClient = originalClient;
});
});
describe('limiterCache', () => {
it('should return undefined when USE_REDIS is false', () => {
cacheConfig.USE_REDIS = false;
const result = limiterCache('prefix');
expect(result).toBeUndefined();
});
it('should return RedisStore when USE_REDIS is true', () => {
cacheConfig.USE_REDIS = true;
const result = limiterCache('rate-limit');
expect(mockRedisStore).toHaveBeenCalledWith({
sendCommand: expect.any(Function),
prefix: `rate-limit:`,
});
expect(result).toBe(mockRedisStore());
});
it('should add colon to prefix if not present', () => {
cacheConfig.USE_REDIS = true;
limiterCache('rate-limit');
expect(mockRedisStore).toHaveBeenCalledWith({
sendCommand: expect.any(Function),
prefix: 'rate-limit:',
});
});
it('should not add colon to prefix if already present', () => {
cacheConfig.USE_REDIS = true;
limiterCache('rate-limit:');
expect(mockRedisStore).toHaveBeenCalledWith({
sendCommand: expect.any(Function),
prefix: 'rate-limit:',
});
});
it('should pass sendCommand function that calls ioredisClient.call', async () => {
cacheConfig.USE_REDIS = true;
mockIoredisClient.call.mockResolvedValue('test-value');
limiterCache('rate-limit');
const sendCommandCall = mockRedisStore.mock.calls[0][0];
const sendCommand = sendCommandCall.sendCommand;
// Test that sendCommand properly delegates to ioredisClient.call
const args = ['GET', 'test-key'];
const result = await sendCommand(...args);
expect(mockIoredisClient.call).toHaveBeenCalledWith(...args);
expect(result).toBe('test-value');
});
it('should handle sendCommand errors properly', async () => {
cacheConfig.USE_REDIS = true;
// Mock the call method to reject with an error
const testError = new Error('Redis error');
mockIoredisClient.call.mockRejectedValue(testError);
limiterCache('rate-limit');
const sendCommandCall = mockRedisStore.mock.calls[0][0];
const sendCommand = sendCommandCall.sendCommand;
// Test that sendCommand properly handles errors
const args = ['GET', 'test-key'];
await expect(sendCommand(...args)).rejects.toThrow('Redis error');
expect(mockIoredisClient.call).toHaveBeenCalledWith(...args);
});
it('should handle undefined prefix', () => {
cacheConfig.USE_REDIS = true;
expect(() => limiterCache()).toThrow('prefix is required');
});
});
});

View File

@@ -1,113 +1,53 @@
const { cacheConfig } = require('./cacheConfig');
const { Keyv } = require('keyv');
const { isEnabled, math } = require('@librechat/api');
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const keyvRedis = require('./keyvRedis');
const { logFile } = require('./keyvFiles');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS, DEBUG_MEMORY_CACHE, CI } = process.env ?? {};
const duration = math(BAN_DURATION, 7200000);
const isRedisEnabled = isEnabled(USE_REDIS);
const debugMemoryCache = isEnabled(DEBUG_MEMORY_CACHE);
const createViolationInstance = (namespace) => {
const config = isRedisEnabled ? { store: keyvRedis } : { store: violationFile, namespace };
return new Keyv(config);
};
// Serve cache from memory so no need to clear it on startup/exit
const pending_req = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.PENDING_REQ });
const config = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const roles = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ROLES });
const mcpTools = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.MCP_TOOLS });
const audioRuns = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
const messages = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.ONE_MINUTE })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.ONE_MINUTE });
const flows = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.FLOWS, ttl: Time.ONE_MINUTE * 3 });
const tokenConfig = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
const genTitle = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
const s3ExpiryInterval = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.S3_EXPIRY_INTERVAL, ttl: Time.THIRTY_MINUTES });
const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
const abortKeys = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
const openIdExchangedTokensCache = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.OPENID_EXCHANGED_TOKENS, ttl: Time.TEN_MINUTES });
const { standardCache, sessionCache, violationCache } = require('./cacheFactory');
const namespaces = {
[CacheKeys.ROLES]: roles,
[CacheKeys.MCP_TOOLS]: mcpTools,
[CacheKeys.CONFIG_STORE]: config,
[CacheKeys.PENDING_REQ]: pending_req,
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
[CacheKeys.ENCODED_DOMAINS]: new Keyv({
[ViolationTypes.GENERAL]: new Keyv({ store: logFile, namespace: 'violations' }),
[ViolationTypes.LOGINS]: violationCache(ViolationTypes.LOGINS),
[ViolationTypes.CONCURRENT]: violationCache(ViolationTypes.CONCURRENT),
[ViolationTypes.NON_BROWSER]: violationCache(ViolationTypes.NON_BROWSER),
[ViolationTypes.MESSAGE_LIMIT]: violationCache(ViolationTypes.MESSAGE_LIMIT),
[ViolationTypes.REGISTRATIONS]: violationCache(ViolationTypes.REGISTRATIONS),
[ViolationTypes.TOKEN_BALANCE]: violationCache(ViolationTypes.TOKEN_BALANCE),
[ViolationTypes.TTS_LIMIT]: violationCache(ViolationTypes.TTS_LIMIT),
[ViolationTypes.STT_LIMIT]: violationCache(ViolationTypes.STT_LIMIT),
[ViolationTypes.CONVO_ACCESS]: violationCache(ViolationTypes.CONVO_ACCESS),
[ViolationTypes.TOOL_CALL_LIMIT]: violationCache(ViolationTypes.TOOL_CALL_LIMIT),
[ViolationTypes.FILE_UPLOAD_LIMIT]: violationCache(ViolationTypes.FILE_UPLOAD_LIMIT),
[ViolationTypes.VERIFY_EMAIL_LIMIT]: violationCache(ViolationTypes.VERIFY_EMAIL_LIMIT),
[ViolationTypes.RESET_PASSWORD_LIMIT]: violationCache(ViolationTypes.RESET_PASSWORD_LIMIT),
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: violationCache(ViolationTypes.ILLEGAL_MODEL_REQUEST),
[ViolationTypes.BAN]: new Keyv({
store: keyvMongo,
namespace: CacheKeys.ENCODED_DOMAINS,
ttl: 0,
namespace: CacheKeys.BANS,
ttl: cacheConfig.BAN_DURATION,
}),
general: new Keyv({ store: logFile, namespace: 'violations' }),
concurrent: createViolationInstance('concurrent'),
non_browser: createViolationInstance('non_browser'),
message_limit: createViolationInstance('message_limit'),
token_balance: createViolationInstance(ViolationTypes.TOKEN_BALANCE),
registrations: createViolationInstance('registrations'),
[ViolationTypes.TTS_LIMIT]: createViolationInstance(ViolationTypes.TTS_LIMIT),
[ViolationTypes.STT_LIMIT]: createViolationInstance(ViolationTypes.STT_LIMIT),
[ViolationTypes.CONVO_ACCESS]: createViolationInstance(ViolationTypes.CONVO_ACCESS),
[ViolationTypes.TOOL_CALL_LIMIT]: createViolationInstance(ViolationTypes.TOOL_CALL_LIMIT),
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
[ViolationTypes.VERIFY_EMAIL_LIMIT]: createViolationInstance(ViolationTypes.VERIFY_EMAIL_LIMIT),
[ViolationTypes.RESET_PASSWORD_LIMIT]: createViolationInstance(
ViolationTypes.RESET_PASSWORD_LIMIT,
[CacheKeys.OPENID_SESSION]: sessionCache(CacheKeys.OPENID_SESSION),
[CacheKeys.SAML_SESSION]: sessionCache(CacheKeys.SAML_SESSION),
[CacheKeys.ROLES]: standardCache(CacheKeys.ROLES),
[CacheKeys.MCP_TOOLS]: standardCache(CacheKeys.MCP_TOOLS),
[CacheKeys.CONFIG_STORE]: standardCache(CacheKeys.CONFIG_STORE),
[CacheKeys.STATIC_CONFIG]: standardCache(CacheKeys.STATIC_CONFIG),
[CacheKeys.PENDING_REQ]: standardCache(CacheKeys.PENDING_REQ),
[CacheKeys.ENCODED_DOMAINS]: new Keyv({ store: keyvMongo, namespace: CacheKeys.ENCODED_DOMAINS }),
[CacheKeys.ABORT_KEYS]: standardCache(CacheKeys.ABORT_KEYS, Time.TEN_MINUTES),
[CacheKeys.TOKEN_CONFIG]: standardCache(CacheKeys.TOKEN_CONFIG, Time.THIRTY_MINUTES),
[CacheKeys.GEN_TITLE]: standardCache(CacheKeys.GEN_TITLE, Time.TWO_MINUTES),
[CacheKeys.S3_EXPIRY_INTERVAL]: standardCache(CacheKeys.S3_EXPIRY_INTERVAL, Time.THIRTY_MINUTES),
[CacheKeys.MODEL_QUERIES]: standardCache(CacheKeys.MODEL_QUERIES),
[CacheKeys.AUDIO_RUNS]: standardCache(CacheKeys.AUDIO_RUNS, Time.TEN_MINUTES),
[CacheKeys.MESSAGES]: standardCache(CacheKeys.MESSAGES, Time.ONE_MINUTE),
[CacheKeys.FLOWS]: standardCache(CacheKeys.FLOWS, Time.ONE_MINUTE * 3),
[CacheKeys.OPENID_EXCHANGED_TOKENS]: standardCache(
CacheKeys.OPENID_EXCHANGED_TOKENS,
Time.TEN_MINUTES,
),
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
ViolationTypes.ILLEGAL_MODEL_REQUEST,
),
logins: createViolationInstance('logins'),
[CacheKeys.ABORT_KEYS]: abortKeys,
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
[CacheKeys.GEN_TITLE]: genTitle,
[CacheKeys.S3_EXPIRY_INTERVAL]: s3ExpiryInterval,
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
[CacheKeys.FLOWS]: flows,
[CacheKeys.OPENID_EXCHANGED_TOKENS]: openIdExchangedTokensCache,
};
/**
@@ -116,7 +56,10 @@ const namespaces = {
*/
function getTTLStores() {
return Object.values(namespaces).filter(
(store) => store instanceof Keyv && typeof store.opts?.ttl === 'number' && store.opts.ttl > 0,
(store) =>
store instanceof Keyv &&
parseInt(store.opts?.ttl ?? '0') > 0 &&
!store.opts?.store?.constructor?.name?.includes('Redis'), // Only include non-Redis stores
);
}
@@ -152,18 +95,18 @@ async function clearExpiredFromCache(cache) {
if (data?.expires && data.expires <= expiryTime) {
const deleted = await cache.opts.store.delete(key);
if (!deleted) {
debugMemoryCache &&
cacheConfig.DEBUG_MEMORY_CACHE &&
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
continue;
}
cleared++;
}
} catch (error) {
debugMemoryCache &&
cacheConfig.DEBUG_MEMORY_CACHE &&
console.log(`[Cache] Error processing entry from ${cache.opts.namespace}:`, error);
const deleted = await cache.opts.store.delete(key);
if (!deleted) {
debugMemoryCache &&
cacheConfig.DEBUG_MEMORY_CACHE &&
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
continue;
}
@@ -172,7 +115,7 @@ async function clearExpiredFromCache(cache) {
}
if (cleared > 0) {
debugMemoryCache &&
cacheConfig.DEBUG_MEMORY_CACHE &&
console.log(
`[Cache] Cleared ${cleared} entries older than ${ttl}ms from ${cache.opts.namespace}`,
);
@@ -213,7 +156,7 @@ async function clearAllExpiredFromCache() {
}
}
if (!isRedisEnabled && !isEnabled(CI)) {
if (!cacheConfig.USE_REDIS && !cacheConfig.CI) {
/** @type {Set<NodeJS.Timeout>} */
const cleanupIntervals = new Set();
@@ -224,7 +167,7 @@ if (!isRedisEnabled && !isEnabled(CI)) {
cleanupIntervals.add(cleanup);
if (debugMemoryCache) {
if (cacheConfig.DEBUG_MEMORY_CACHE) {
const monitor = setInterval(() => {
const ttlStores = getTTLStores();
const memory = process.memoryUsage();
@@ -245,13 +188,13 @@ if (!isRedisEnabled && !isEnabled(CI)) {
}
const dispose = () => {
debugMemoryCache && console.log('[Cache] Cleaning up and shutting down...');
cacheConfig.DEBUG_MEMORY_CACHE && console.log('[Cache] Cleaning up and shutting down...');
cleanupIntervals.forEach((interval) => clearInterval(interval));
cleanupIntervals.clear();
// One final cleanup before exit
clearAllExpiredFromCache().then(() => {
debugMemoryCache && console.log('[Cache] Final cleanup completed');
cacheConfig.DEBUG_MEMORY_CACHE && console.log('[Cache] Final cleanup completed');
process.exit(0);
});
};

View File

@@ -1,92 +0,0 @@
const fs = require('fs');
const Redis = require('ioredis');
const { isEnabled } = require('~/server/utils');
const logger = require('~/config/winston');
const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_MAX_LISTENERS } = process.env;
/** @type {import('ioredis').Redis | import('ioredis').Cluster} */
let ioredisClient;
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
function mapURI(uri) {
const regex =
/^(?:(?<scheme>\w+):\/\/)?(?:(?<user>[^:@]+)(?::(?<password>[^@]+))?@)?(?<host>[\w.-]+)(?::(?<port>\d{1,5}))?$/;
const match = uri.match(regex);
if (match) {
const { scheme, user, password, host, port } = match.groups;
return {
scheme: scheme || 'none',
user: user || null,
password: password || null,
host: host || null,
port: port || null,
};
} else {
const parts = uri.split(':');
if (parts.length === 2) {
return {
scheme: 'none',
user: null,
password: null,
host: parts[0],
port: parts[1],
};
}
return {
scheme: 'none',
user: null,
password: null,
host: uri,
port: null,
};
}
}
if (REDIS_URI && isEnabled(USE_REDIS)) {
let redisOptions = null;
if (REDIS_CA) {
const ca = fs.readFileSync(REDIS_CA);
redisOptions = { tls: { ca } };
}
if (isEnabled(USE_REDIS_CLUSTER)) {
const hosts = REDIS_URI.split(',').map((item) => {
var value = mapURI(item);
return {
host: value.host,
port: value.port,
};
});
ioredisClient = new Redis.Cluster(hosts, { redisOptions });
} else {
ioredisClient = new Redis(REDIS_URI, redisOptions);
}
ioredisClient.on('ready', () => {
logger.info('IoRedis connection ready');
});
ioredisClient.on('reconnecting', () => {
logger.info('IoRedis connection reconnecting');
});
ioredisClient.on('end', () => {
logger.info('IoRedis connection ended');
});
ioredisClient.on('close', () => {
logger.info('IoRedis connection closed');
});
ioredisClient.on('error', (err) => logger.error('IoRedis connection error:', err));
ioredisClient.setMaxListeners(redis_max_listeners);
logger.info(
'[Optional] IoRedis initialized for rate limiters. If you have issues, disable Redis or restart the server.',
);
} else {
logger.info('[Optional] IoRedis not initialized for rate limiters.');
}
module.exports = ioredisClient;

109
api/cache/keyvRedis.js vendored
View File

@@ -1,109 +0,0 @@
const fs = require('fs');
const ioredis = require('ioredis');
const KeyvRedis = require('@keyv/redis').default;
const { isEnabled } = require('~/server/utils');
const logger = require('~/config/winston');
const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, REDIS_MAX_LISTENERS } =
process.env;
let keyvRedis;
const redis_prefix = REDIS_KEY_PREFIX || '';
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 40;
function mapURI(uri) {
const regex =
/^(?:(?<scheme>\w+):\/\/)?(?:(?<user>[^:@]+)(?::(?<password>[^@]+))?@)?(?<host>[\w.-]+)(?::(?<port>\d{1,5}))?$/;
const match = uri.match(regex);
if (match) {
const { scheme, user, password, host, port } = match.groups;
return {
scheme: scheme || 'none',
user: user || null,
password: password || null,
host: host || null,
port: port || null,
};
} else {
const parts = uri.split(':');
if (parts.length === 2) {
return {
scheme: 'none',
user: null,
password: null,
host: parts[0],
port: parts[1],
};
}
return {
scheme: 'none',
user: null,
password: null,
host: uri,
port: null,
};
}
}
if (REDIS_URI && isEnabled(USE_REDIS)) {
let redisOptions = null;
/** @type {import('@keyv/redis').KeyvRedisOptions} */
let keyvOpts = {
useRedisSets: false,
keyPrefix: redis_prefix,
};
if (REDIS_CA) {
const ca = fs.readFileSync(REDIS_CA);
redisOptions = { tls: { ca } };
}
if (isEnabled(USE_REDIS_CLUSTER)) {
const hosts = REDIS_URI.split(',').map((item) => {
var value = mapURI(item);
return {
host: value.host,
port: value.port,
};
});
const cluster = new ioredis.Cluster(hosts, { redisOptions });
keyvRedis = new KeyvRedis(cluster, keyvOpts);
} else {
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
}
const pingInterval = setInterval(
() => {
logger.debug('KeyvRedis ping');
keyvRedis.client.ping().catch((err) => logger.error('Redis keep-alive ping failed:', err));
},
5 * 60 * 1000,
);
keyvRedis.on('ready', () => {
logger.info('KeyvRedis connection ready');
});
keyvRedis.on('reconnecting', () => {
logger.info('KeyvRedis connection reconnecting');
});
keyvRedis.on('end', () => {
logger.info('KeyvRedis connection ended');
});
keyvRedis.on('close', () => {
clearInterval(pingInterval);
logger.info('KeyvRedis connection closed');
});
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
keyvRedis.setMaxListeners(redis_max_listeners);
logger.info(
'[Optional] Redis initialized. If you have issues, or seeing older values, disable it or flush cache to refresh values.',
);
} else {
logger.info('[Optional] Redis not initialized.');
}
module.exports = keyvRedis;

View File

@@ -1,4 +1,5 @@
const { isEnabled } = require('~/server/utils');
const { ViolationTypes } = require('librechat-data-provider');
const getLogStores = require('./getLogStores');
const banViolation = require('./banViolation');
@@ -9,14 +10,14 @@ const banViolation = require('./banViolation');
* @param {Object} res - Express response object.
* @param {string} type - The type of violation.
* @param {Object} errorMessage - The error message to log.
* @param {number} [score=1] - The severity of the violation. Defaults to 1
* @param {number | string} [score=1] - The severity of the violation. Defaults to 1
*/
const logViolation = async (req, res, type, errorMessage, score = 1) => {
const userId = req.user?.id ?? req.user?._id;
if (!userId) {
return;
}
const logs = getLogStores('general');
const logs = getLogStores(ViolationTypes.GENERAL);
const violationLogs = getLogStores(type);
const key = isEnabled(process.env.USE_REDIS) ? `${type}:${userId}` : userId;

204
api/cache/redisClients.js vendored Normal file
View File

@@ -0,0 +1,204 @@
const IoRedis = require('ioredis');
const { logger } = require('@librechat/data-schemas');
const { createClient, createCluster } = require('@keyv/redis');
const { cacheConfig } = require('./cacheConfig');
const GLOBAL_PREFIX_SEPARATOR = '::';
const urls = cacheConfig.REDIS_URI?.split(',').map((uri) => new URL(uri));
const username = urls?.[0].username || cacheConfig.REDIS_USERNAME;
const password = urls?.[0].password || cacheConfig.REDIS_PASSWORD;
const ca = cacheConfig.REDIS_CA;
/** @type {import('ioredis').Redis | import('ioredis').Cluster | null} */
let ioredisClient = null;
if (cacheConfig.USE_REDIS) {
/** @type {import('ioredis').RedisOptions | import('ioredis').ClusterOptions} */
const redisOptions = {
username: username,
password: password,
tls: ca ? { ca } : undefined,
keyPrefix: `${cacheConfig.REDIS_KEY_PREFIX}${GLOBAL_PREFIX_SEPARATOR}`,
maxListeners: cacheConfig.REDIS_MAX_LISTENERS,
retryStrategy: (times) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
times > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`ioredis giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return null;
}
const delay = Math.min(times * 50, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
reconnectOnError: (err) => {
const targetError = 'READONLY';
if (err.message.includes(targetError)) {
logger.warn('ioredis reconnecting due to READONLY error');
return true;
}
return false;
},
enableOfflineQueue: cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
connectTimeout: cacheConfig.REDIS_CONNECT_TIMEOUT,
maxRetriesPerRequest: 3,
};
ioredisClient =
urls.length === 1
? new IoRedis(cacheConfig.REDIS_URI, redisOptions)
: new IoRedis.Cluster(cacheConfig.REDIS_URI, {
redisOptions,
clusterRetryStrategy: (times) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
times > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`ioredis cluster giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return null;
}
const delay = Math.min(times * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`ioredis cluster reconnecting... attempt ${times}, delay ${delay}ms`);
return delay;
},
enableOfflineQueue: cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
});
ioredisClient.on('error', (err) => {
logger.error('ioredis client error:', err);
});
ioredisClient.on('connect', () => {
logger.info('ioredis client connected');
});
ioredisClient.on('ready', () => {
logger.info('ioredis client ready');
});
ioredisClient.on('reconnecting', (delay) => {
logger.info(`ioredis client reconnecting in ${delay}ms`);
});
ioredisClient.on('close', () => {
logger.warn('ioredis client connection closed');
});
/** Ping Interval to keep the Redis server connection alive (if enabled) */
let pingInterval = null;
const clearPingInterval = () => {
if (pingInterval) {
clearInterval(pingInterval);
pingInterval = null;
}
};
if (cacheConfig.REDIS_PING_INTERVAL > 0) {
pingInterval = setInterval(() => {
if (ioredisClient && ioredisClient.status === 'ready') {
ioredisClient.ping().catch((err) => {
logger.error('ioredis ping failed:', err);
});
}
}, cacheConfig.REDIS_PING_INTERVAL * 1000);
ioredisClient.on('close', clearPingInterval);
ioredisClient.on('end', clearPingInterval);
}
}
/** @type {import('@keyv/redis').RedisClient | import('@keyv/redis').RedisCluster | null} */
let keyvRedisClient = null;
if (cacheConfig.USE_REDIS) {
/**
* ** WARNING ** Keyv Redis client does not support Prefix like ioredis above.
* The prefix feature will be handled by the Keyv-Redis store in cacheFactory.js
* @type {import('@keyv/redis').RedisClientOptions | import('@keyv/redis').RedisClusterOptions}
*/
const redisOptions = {
username,
password,
socket: {
tls: ca != null,
ca,
connectTimeout: cacheConfig.REDIS_CONNECT_TIMEOUT,
reconnectStrategy: (retries) => {
if (
cacheConfig.REDIS_RETRY_MAX_ATTEMPTS > 0 &&
retries > cacheConfig.REDIS_RETRY_MAX_ATTEMPTS
) {
logger.error(
`@keyv/redis client giving up after ${cacheConfig.REDIS_RETRY_MAX_ATTEMPTS} reconnection attempts`,
);
return new Error('Max reconnection attempts reached');
}
const delay = Math.min(retries * 100, cacheConfig.REDIS_RETRY_MAX_DELAY);
logger.info(`@keyv/redis reconnecting... attempt ${retries}, delay ${delay}ms`);
return delay;
},
},
disableOfflineQueue: !cacheConfig.REDIS_ENABLE_OFFLINE_QUEUE,
};
keyvRedisClient =
urls.length === 1
? createClient({ url: cacheConfig.REDIS_URI, ...redisOptions })
: createCluster({
rootNodes: cacheConfig.REDIS_URI.split(',').map((url) => ({ url })),
defaults: redisOptions,
});
keyvRedisClient.setMaxListeners(cacheConfig.REDIS_MAX_LISTENERS);
keyvRedisClient.on('error', (err) => {
logger.error('@keyv/redis client error:', err);
});
keyvRedisClient.on('connect', () => {
logger.info('@keyv/redis client connected');
});
keyvRedisClient.on('ready', () => {
logger.info('@keyv/redis client ready');
});
keyvRedisClient.on('reconnecting', () => {
logger.info('@keyv/redis client reconnecting...');
});
keyvRedisClient.on('disconnect', () => {
logger.warn('@keyv/redis client disconnected');
});
keyvRedisClient.connect().catch((err) => {
logger.error('@keyv/redis initial connection failed:', err);
throw err;
});
/** Ping Interval to keep the Redis server connection alive (if enabled) */
let pingInterval = null;
const clearPingInterval = () => {
if (pingInterval) {
clearInterval(pingInterval);
pingInterval = null;
}
};
if (cacheConfig.REDIS_PING_INTERVAL > 0) {
pingInterval = setInterval(() => {
if (keyvRedisClient && keyvRedisClient.isReady) {
keyvRedisClient.ping().catch((err) => {
logger.error('@keyv/redis ping failed:', err);
});
}
}, cacheConfig.REDIS_PING_INTERVAL * 1000);
keyvRedisClient.on('disconnect', clearPingInterval);
keyvRedisClient.on('end', clearPingInterval);
}
}
module.exports = { ioredisClient, keyvRedisClient, GLOBAL_PREFIX_SEPARATOR };

View File

@@ -61,7 +61,7 @@ const getAgent = async (searchParameter) => await Agent.findOne(searchParameter)
const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _m }) => {
const { model, ...model_parameters } = _m;
/** @type {Record<string, FunctionTool>} */
const availableTools = await getCachedTools({ includeGlobal: true });
const availableTools = await getCachedTools({ userId: req.user.id, includeGlobal: true });
/** @type {TEphemeralAgent | null} */
const ephemeralAgent = req.body.ephemeralAgent;
const mcpServers = new Set(ephemeralAgent?.mcp);
@@ -90,7 +90,7 @@ const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _
}
const instructions = req.body.promptPrefix;
return {
const result = {
id: agent_id,
instructions,
provider: endpoint,
@@ -98,6 +98,11 @@ const loadEphemeralAgent = async ({ req, agent_id, endpoint, model_parameters: _
model,
tools,
};
if (ephemeralAgent?.artifacts != null && ephemeralAgent.artifacts) {
result.artifacts = ephemeralAgent.artifacts;
}
return result;
};
/**

View File

@@ -1,6 +1,6 @@
const { logger } = require('@librechat/data-schemas');
const { createTempChatExpirationDate } = require('@librechat/api');
const getCustomConfig = require('~/server/services/Config/loadCustomConfig');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const { getMessages, deleteMessages } = require('./Message');
const { Conversation } = require('~/db/models');

View File

@@ -0,0 +1,572 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { EModelEndpoint } = require('librechat-data-provider');
const { MongoMemoryServer } = require('mongodb-memory-server');
const {
deleteNullOrEmptyConversations,
searchConversation,
getConvosByCursor,
getConvosQueried,
getConvoFiles,
getConvoTitle,
deleteConvos,
saveConvo,
getConvo,
} = require('./Conversation');
jest.mock('~/server/services/Config/getCustomConfig');
jest.mock('./Message');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const { getMessages, deleteMessages } = require('./Message');
const { Conversation } = require('~/db/models');
describe('Conversation Operations', () => {
let mongoServer;
let mockReq;
let mockConversationData;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
// Clear database
await Conversation.deleteMany({});
// Reset mocks
jest.clearAllMocks();
// Default mock implementations
getMessages.mockResolvedValue([]);
deleteMessages.mockResolvedValue({ deletedCount: 0 });
mockReq = {
user: { id: 'user123' },
body: {},
};
mockConversationData = {
conversationId: uuidv4(),
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
};
});
describe('saveConvo', () => {
it('should save a conversation for an authenticated user', async () => {
const result = await saveConvo(mockReq, mockConversationData);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.user).toBe('user123');
expect(result.title).toBe('Test Conversation');
expect(result.endpoint).toBe(EModelEndpoint.openAI);
// Verify the conversation was actually saved to the database
const savedConvo = await Conversation.findOne({
conversationId: mockConversationData.conversationId,
user: 'user123',
});
expect(savedConvo).toBeTruthy();
expect(savedConvo.title).toBe('Test Conversation');
});
it('should query messages when saving a conversation', async () => {
// Mock messages as ObjectIds
const mongoose = require('mongoose');
const mockMessages = [new mongoose.Types.ObjectId(), new mongoose.Types.ObjectId()];
getMessages.mockResolvedValue(mockMessages);
await saveConvo(mockReq, mockConversationData);
// Verify that getMessages was called with correct parameters
expect(getMessages).toHaveBeenCalledWith(
{ conversationId: mockConversationData.conversationId },
'_id',
);
});
it('should handle newConversationId when provided', async () => {
const newConversationId = uuidv4();
const result = await saveConvo(mockReq, {
...mockConversationData,
newConversationId,
});
expect(result.conversationId).toBe(newConversationId);
});
it('should handle unsetFields metadata', async () => {
const metadata = {
unsetFields: { someField: 1 },
};
await saveConvo(mockReq, mockConversationData, metadata);
const savedConvo = await Conversation.findOne({
conversationId: mockConversationData.conversationId,
});
expect(savedConvo.someField).toBeUndefined();
});
});
describe('isTemporary conversation handling', () => {
it('should save a conversation with expiredAt when isTemporary is true', async () => {
// Mock custom config with 24 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
const afterSave = new Date();
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeDefined();
expect(result.expiredAt).toBeInstanceOf(Date);
// Verify expiredAt is approximately 24 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
new Date(afterSave.getTime() + 24 * 60 * 60 * 1000 + 1000).getTime(),
);
});
it('should save a conversation without expiredAt when isTemporary is false', async () => {
mockReq.body = { isTemporary: false };
const result = await saveConvo(mockReq, mockConversationData);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeNull();
});
it('should save a conversation without expiredAt when isTemporary is not provided', async () => {
// No isTemporary in body
mockReq.body = {};
const result = await saveConvo(mockReq, mockConversationData);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeNull();
});
it('should use custom retention period from config', async () => {
// Mock custom config with 48 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 48,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 48 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 48 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle minimum retention period (1 hour)', async () => {
// Mock custom config with less than minimum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 0.5, // Half hour - should be clamped to 1 hour
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 1 hour in the future (minimum)
const expectedExpirationTime = new Date(beforeSave.getTime() + 1 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle maximum retention period (8760 hours)', async () => {
// Mock custom config with more than maximum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 10000, // Should be clamped to 8760 hours
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 8760 hours (1 year) in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 8760 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle getCustomConfig errors gracefully', async () => {
// Mock getCustomConfig to throw an error
getCustomConfig.mockRejectedValue(new Error('Config service unavailable'));
mockReq.body = { isTemporary: true };
const result = await saveConvo(mockReq, mockConversationData);
// Should still save the conversation but with expiredAt as null
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.expiredAt).toBeNull();
});
it('should use default retention when config is not provided', async () => {
// Mock getCustomConfig to return empty config
getCustomConfig.mockResolvedValue({});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveConvo(mockReq, mockConversationData);
expect(result.expiredAt).toBeDefined();
// Default retention is 30 days (720 hours)
const expectedExpirationTime = new Date(beforeSave.getTime() + 30 * 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should update expiredAt when saving existing temporary conversation', async () => {
// First save a temporary conversation
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const firstSave = await saveConvo(mockReq, mockConversationData);
const originalExpiredAt = firstSave.expiredAt;
// Wait a bit to ensure time difference
await new Promise((resolve) => setTimeout(resolve, 100));
// Save again with same conversationId but different title
const updatedData = { ...mockConversationData, title: 'Updated Title' };
const secondSave = await saveConvo(mockReq, updatedData);
// Should update title and create new expiredAt
expect(secondSave.title).toBe('Updated Title');
expect(secondSave.expiredAt).toBeDefined();
expect(new Date(secondSave.expiredAt).getTime()).toBeGreaterThan(
new Date(originalExpiredAt).getTime(),
);
});
it('should not set expiredAt when updating non-temporary conversation', async () => {
// First save a non-temporary conversation
mockReq.body = { isTemporary: false };
const firstSave = await saveConvo(mockReq, mockConversationData);
expect(firstSave.expiredAt).toBeNull();
// Update without isTemporary flag
mockReq.body = {};
const updatedData = { ...mockConversationData, title: 'Updated Title' };
const secondSave = await saveConvo(mockReq, updatedData);
expect(secondSave.title).toBe('Updated Title');
expect(secondSave.expiredAt).toBeNull();
});
it('should filter out expired conversations in getConvosByCursor', async () => {
// Create some test conversations
const nonExpiredConvo = await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Non-expired',
endpoint: EModelEndpoint.openAI,
expiredAt: null,
updatedAt: new Date(),
});
await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Future expired',
endpoint: EModelEndpoint.openAI,
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000), // 24 hours from now
updatedAt: new Date(),
});
// Mock Meili search
Conversation.meiliSearch = jest.fn().mockResolvedValue({ hits: [] });
const result = await getConvosByCursor('user123');
// Should only return conversations with null or non-existent expiredAt
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].conversationId).toBe(nonExpiredConvo.conversationId);
});
it('should filter out expired conversations in getConvosQueried', async () => {
// Create test conversations
const nonExpiredConvo = await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Non-expired',
endpoint: EModelEndpoint.openAI,
expiredAt: null,
});
const expiredConvo = await Conversation.create({
conversationId: uuidv4(),
user: 'user123',
title: 'Expired',
endpoint: EModelEndpoint.openAI,
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000),
});
const convoIds = [
{ conversationId: nonExpiredConvo.conversationId },
{ conversationId: expiredConvo.conversationId },
];
const result = await getConvosQueried('user123', convoIds);
// Should only return the non-expired conversation
expect(result.conversations).toHaveLength(1);
expect(result.conversations[0].conversationId).toBe(nonExpiredConvo.conversationId);
expect(result.convoMap[nonExpiredConvo.conversationId]).toBeDefined();
expect(result.convoMap[expiredConvo.conversationId]).toBeUndefined();
});
});
describe('searchConversation', () => {
it('should find a conversation by conversationId', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'Test',
endpoint: EModelEndpoint.openAI,
});
const result = await searchConversation(mockConversationData.conversationId);
expect(result).toBeTruthy();
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.user).toBe('user123');
expect(result.title).toBeUndefined(); // Only returns conversationId and user
});
it('should return null if conversation not found', async () => {
const result = await searchConversation('non-existent-id');
expect(result).toBeNull();
});
});
describe('getConvo', () => {
it('should retrieve a conversation for a user', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'Test Conversation',
endpoint: EModelEndpoint.openAI,
});
const result = await getConvo('user123', mockConversationData.conversationId);
expect(result.conversationId).toBe(mockConversationData.conversationId);
expect(result.user).toBe('user123');
expect(result.title).toBe('Test Conversation');
});
it('should return null if conversation not found', async () => {
const result = await getConvo('user123', 'non-existent-id');
expect(result).toBeNull();
});
});
describe('getConvoTitle', () => {
it('should return the conversation title', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'Test Title',
endpoint: EModelEndpoint.openAI,
});
const result = await getConvoTitle('user123', mockConversationData.conversationId);
expect(result).toBe('Test Title');
});
it('should return null if conversation has no title', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: null,
endpoint: EModelEndpoint.openAI,
});
const result = await getConvoTitle('user123', mockConversationData.conversationId);
expect(result).toBeNull();
});
it('should return "New Chat" if conversation not found', async () => {
const result = await getConvoTitle('user123', 'non-existent-id');
expect(result).toBe('New Chat');
});
});
describe('getConvoFiles', () => {
it('should return conversation files', async () => {
const files = ['file1', 'file2'];
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
endpoint: EModelEndpoint.openAI,
files,
});
const result = await getConvoFiles(mockConversationData.conversationId);
expect(result).toEqual(files);
});
it('should return empty array if no files', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
endpoint: EModelEndpoint.openAI,
});
const result = await getConvoFiles(mockConversationData.conversationId);
expect(result).toEqual([]);
});
it('should return empty array if conversation not found', async () => {
const result = await getConvoFiles('non-existent-id');
expect(result).toEqual([]);
});
});
describe('deleteConvos', () => {
it('should delete conversations and associated messages', async () => {
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user123',
title: 'To Delete',
endpoint: EModelEndpoint.openAI,
});
deleteMessages.mockResolvedValue({ deletedCount: 5 });
const result = await deleteConvos('user123', {
conversationId: mockConversationData.conversationId,
});
expect(result.deletedCount).toBe(1);
expect(result.messages.deletedCount).toBe(5);
expect(deleteMessages).toHaveBeenCalledWith({
conversationId: { $in: [mockConversationData.conversationId] },
});
// Verify conversation was deleted
const deletedConvo = await Conversation.findOne({
conversationId: mockConversationData.conversationId,
});
expect(deletedConvo).toBeNull();
});
it('should throw error if no conversations found', async () => {
await expect(deleteConvos('user123', { conversationId: 'non-existent' })).rejects.toThrow(
'Conversation not found or already deleted.',
);
});
});
describe('deleteNullOrEmptyConversations', () => {
it('should delete conversations with null, empty, or missing conversationIds', async () => {
// Since conversationId is required by the schema, we can't create documents with null/missing IDs
// This test should verify the function works when such documents exist (e.g., from data corruption)
// For this test, let's create a valid conversation and verify the function doesn't delete it
await Conversation.create({
conversationId: mockConversationData.conversationId,
user: 'user4',
endpoint: EModelEndpoint.openAI,
});
deleteMessages.mockResolvedValue({ deletedCount: 0 });
const result = await deleteNullOrEmptyConversations();
expect(result.conversations.deletedCount).toBe(0); // No invalid conversations to delete
expect(result.messages.deletedCount).toBe(0);
// Verify valid conversation remains
const remainingConvos = await Conversation.find({});
expect(remainingConvos).toHaveLength(1);
expect(remainingConvos[0].conversationId).toBe(mockConversationData.conversationId);
});
});
describe('Error Handling', () => {
it('should handle database errors in saveConvo', async () => {
// Force a database error by disconnecting
await mongoose.disconnect();
const result = await saveConvo(mockReq, mockConversationData);
expect(result).toEqual({ message: 'Error saving conversation' });
// Reconnect for other tests
await mongoose.connect(mongoServer.getUri());
});
});
});

View File

@@ -1,5 +1,7 @@
const { logger } = require('@librechat/data-schemas');
const { EToolResources, FileContext } = require('librechat-data-provider');
const { EToolResources, FileContext, Constants } = require('librechat-data-provider');
const { getProjectByName } = require('./Project');
const { getAgent } = require('./Agent');
const { File } = require('~/db/models');
/**
@@ -12,17 +14,124 @@ const findFileById = async (file_id, options = {}) => {
return await File.findOne({ file_id, ...options }).lean();
};
/**
* Checks if a user has access to multiple files through a shared agent (batch operation)
* @param {string} userId - The user ID to check access for
* @param {string[]} fileIds - Array of file IDs to check
* @param {string} agentId - The agent ID that might grant access
* @returns {Promise<Map<string, boolean>>} Map of fileId to access status
*/
const hasAccessToFilesViaAgent = async (userId, fileIds, agentId, checkCollaborative = true) => {
const accessMap = new Map();
// Initialize all files as no access
fileIds.forEach((fileId) => accessMap.set(fileId, false));
try {
const agent = await getAgent({ id: agentId });
if (!agent) {
return accessMap;
}
// Check if user is the author - if so, grant access to all files
if (agent.author.toString() === userId) {
fileIds.forEach((fileId) => accessMap.set(fileId, true));
return accessMap;
}
// Check if agent is shared with the user via projects
if (!agent.projectIds || agent.projectIds.length === 0) {
return accessMap;
}
// Check if agent is in global project
const globalProject = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, '_id');
if (
!globalProject ||
!agent.projectIds.some((pid) => pid.toString() === globalProject._id.toString())
) {
return accessMap;
}
// Agent is globally shared - check if it's collaborative
if (checkCollaborative && !agent.isCollaborative) {
return accessMap;
}
// Check which files are actually attached
const attachedFileIds = new Set();
if (agent.tool_resources) {
for (const [_resourceType, resource] of Object.entries(agent.tool_resources)) {
if (resource?.file_ids && Array.isArray(resource.file_ids)) {
resource.file_ids.forEach((fileId) => attachedFileIds.add(fileId));
}
}
}
// Grant access only to files that are attached to this agent
fileIds.forEach((fileId) => {
if (attachedFileIds.has(fileId)) {
accessMap.set(fileId, true);
}
});
return accessMap;
} catch (error) {
logger.error('[hasAccessToFilesViaAgent] Error checking file access:', error);
return accessMap;
}
};
/**
* Retrieves files matching a given filter, sorted by the most recently updated.
* @param {Object} filter - The filter criteria to apply.
* @param {Object} [_sortOptions] - Optional sort parameters.
* @param {Object|String} [selectFields={ text: 0 }] - Fields to include/exclude in the query results.
* Default excludes the 'text' field.
* @param {Object} [options] - Additional options
* @param {string} [options.userId] - User ID for access control
* @param {string} [options.agentId] - Agent ID that might grant access to files
* @returns {Promise<Array<MongoFile>>} A promise that resolves to an array of file documents.
*/
const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }) => {
const getFiles = async (filter, _sortOptions, selectFields = { text: 0 }, options = {}) => {
const sortOptions = { updatedAt: -1, ..._sortOptions };
return await File.find(filter).select(selectFields).sort(sortOptions).lean();
const files = await File.find(filter).select(selectFields).sort(sortOptions).lean();
// If userId and agentId are provided, filter files based on access
if (options.userId && options.agentId) {
// Collect file IDs that need access check
const filesToCheck = [];
const ownedFiles = [];
for (const file of files) {
if (file.user && file.user.toString() === options.userId) {
ownedFiles.push(file);
} else {
filesToCheck.push(file);
}
}
if (filesToCheck.length === 0) {
return ownedFiles;
}
// Batch check access for all non-owned files
const fileIds = filesToCheck.map((f) => f.file_id);
const accessMap = await hasAccessToFilesViaAgent(
options.userId,
fileIds,
options.agentId,
false,
);
// Filter files based on access
const accessibleFiles = filesToCheck.filter((file) => accessMap.get(file.file_id));
return [...ownedFiles, ...accessibleFiles];
}
return files;
};
/**
@@ -176,4 +285,5 @@ module.exports = {
deleteFiles,
deleteFileByFilter,
batchUpdateFiles,
hasAccessToFilesViaAgent,
};

264
api/models/File.spec.js Normal file
View File

@@ -0,0 +1,264 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { fileSchema } = require('@librechat/data-schemas');
const { agentSchema } = require('@librechat/data-schemas');
const { projectSchema } = require('@librechat/data-schemas');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
const { getFiles, createFile } = require('./File');
const { getProjectByName } = require('./Project');
const { createAgent } = require('./Agent');
let File;
let Agent;
let Project;
describe('File Access Control', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
File = mongoose.models.File || mongoose.model('File', fileSchema);
Agent = mongoose.models.Agent || mongoose.model('Agent', agentSchema);
Project = mongoose.models.Project || mongoose.model('Project', projectSchema);
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await File.deleteMany({});
await Agent.deleteMany({});
await Project.deleteMany({});
});
describe('hasAccessToFilesViaAgent', () => {
it('should efficiently check access for multiple files at once', async () => {
const userId = new mongoose.Types.ObjectId().toString();
const authorId = new mongoose.Types.ObjectId().toString();
const agentId = uuidv4();
const fileIds = [uuidv4(), uuidv4(), uuidv4(), uuidv4()];
// Create files
for (const fileId of fileIds) {
await createFile({
user: authorId,
file_id: fileId,
filename: `file-${fileId}.txt`,
filepath: `/uploads/${fileId}`,
});
}
// Create agent with only first two files attached
await createAgent({
id: agentId,
name: 'Test Agent',
author: authorId,
model: 'gpt-4',
provider: 'openai',
isCollaborative: true,
tool_resources: {
file_search: {
file_ids: [fileIds[0], fileIds[1]],
},
},
});
// Get or create global project
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, '_id');
// Share agent globally
await Agent.updateOne({ id: agentId }, { $push: { projectIds: globalProject._id } });
// Check access for all files
const { hasAccessToFilesViaAgent } = require('./File');
const accessMap = await hasAccessToFilesViaAgent(userId, fileIds, agentId);
// Should have access only to the first two files
expect(accessMap.get(fileIds[0])).toBe(true);
expect(accessMap.get(fileIds[1])).toBe(true);
expect(accessMap.get(fileIds[2])).toBe(false);
expect(accessMap.get(fileIds[3])).toBe(false);
});
it('should grant access to all files when user is the agent author', async () => {
const authorId = new mongoose.Types.ObjectId().toString();
const agentId = uuidv4();
const fileIds = [uuidv4(), uuidv4(), uuidv4()];
// Create agent
await createAgent({
id: agentId,
name: 'Test Agent',
author: authorId,
model: 'gpt-4',
provider: 'openai',
tool_resources: {
file_search: {
file_ids: [fileIds[0]], // Only one file attached
},
},
});
// Check access as the author
const { hasAccessToFilesViaAgent } = require('./File');
const accessMap = await hasAccessToFilesViaAgent(authorId, fileIds, agentId);
// Author should have access to all files
expect(accessMap.get(fileIds[0])).toBe(true);
expect(accessMap.get(fileIds[1])).toBe(true);
expect(accessMap.get(fileIds[2])).toBe(true);
});
it('should handle non-existent agent gracefully', async () => {
const userId = new mongoose.Types.ObjectId().toString();
const fileIds = [uuidv4(), uuidv4()];
const { hasAccessToFilesViaAgent } = require('./File');
const accessMap = await hasAccessToFilesViaAgent(userId, fileIds, 'non-existent-agent');
// Should have no access to any files
expect(accessMap.get(fileIds[0])).toBe(false);
expect(accessMap.get(fileIds[1])).toBe(false);
});
it('should deny access when agent is not collaborative', async () => {
const userId = new mongoose.Types.ObjectId().toString();
const authorId = new mongoose.Types.ObjectId().toString();
const agentId = uuidv4();
const fileIds = [uuidv4(), uuidv4()];
// Create agent with files but isCollaborative: false
await createAgent({
id: agentId,
name: 'Non-Collaborative Agent',
author: authorId,
model: 'gpt-4',
provider: 'openai',
isCollaborative: false,
tool_resources: {
file_search: {
file_ids: fileIds,
},
},
});
// Get or create global project
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, '_id');
// Share agent globally
await Agent.updateOne({ id: agentId }, { $push: { projectIds: globalProject._id } });
// Check access for files
const { hasAccessToFilesViaAgent } = require('./File');
const accessMap = await hasAccessToFilesViaAgent(userId, fileIds, agentId);
// Should have no access to any files when isCollaborative is false
expect(accessMap.get(fileIds[0])).toBe(false);
expect(accessMap.get(fileIds[1])).toBe(false);
});
});
describe('getFiles with agent access control', () => {
test('should return files owned by user and files accessible through agent', async () => {
const authorId = new mongoose.Types.ObjectId();
const userId = new mongoose.Types.ObjectId();
const agentId = `agent_${uuidv4()}`;
const ownedFileId = `file_${uuidv4()}`;
const sharedFileId = `file_${uuidv4()}`;
const inaccessibleFileId = `file_${uuidv4()}`;
// Create/get global project using getProjectByName which will upsert
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME);
// Create agent with shared file
await createAgent({
id: agentId,
name: 'Shared Agent',
provider: 'test',
model: 'test-model',
author: authorId,
projectIds: [globalProject._id],
isCollaborative: true,
tool_resources: {
file_search: {
file_ids: [sharedFileId],
},
},
});
// Create files
await createFile({
file_id: ownedFileId,
user: userId,
filename: 'owned.txt',
filepath: '/uploads/owned.txt',
type: 'text/plain',
bytes: 100,
});
await createFile({
file_id: sharedFileId,
user: authorId,
filename: 'shared.txt',
filepath: '/uploads/shared.txt',
type: 'text/plain',
bytes: 200,
embedded: true,
});
await createFile({
file_id: inaccessibleFileId,
user: authorId,
filename: 'inaccessible.txt',
filepath: '/uploads/inaccessible.txt',
type: 'text/plain',
bytes: 300,
});
// Get files with access control
const files = await getFiles(
{ file_id: { $in: [ownedFileId, sharedFileId, inaccessibleFileId] } },
null,
{ text: 0 },
{ userId: userId.toString(), agentId },
);
expect(files).toHaveLength(2);
expect(files.map((f) => f.file_id)).toContain(ownedFileId);
expect(files.map((f) => f.file_id)).toContain(sharedFileId);
expect(files.map((f) => f.file_id)).not.toContain(inaccessibleFileId);
});
test('should return all files when no userId/agentId provided', async () => {
const userId = new mongoose.Types.ObjectId();
const fileId1 = `file_${uuidv4()}`;
const fileId2 = `file_${uuidv4()}`;
await createFile({
file_id: fileId1,
user: userId,
filename: 'file1.txt',
filepath: '/uploads/file1.txt',
type: 'text/plain',
bytes: 100,
});
await createFile({
file_id: fileId2,
user: new mongoose.Types.ObjectId(),
filename: 'file2.txt',
filepath: '/uploads/file2.txt',
type: 'text/plain',
bytes: 200,
});
const files = await getFiles({ file_id: { $in: [fileId1, fileId2] } });
expect(files).toHaveLength(2);
});
});
});

View File

@@ -1,7 +1,7 @@
const { z } = require('zod');
const { logger } = require('@librechat/data-schemas');
const { createTempChatExpirationDate } = require('@librechat/api');
const getCustomConfig = require('~/server/services/Config/loadCustomConfig');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const { Message } = require('~/db/models');
const idSchema = z.string().uuid();

View File

@@ -1,17 +1,21 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { v4: uuidv4 } = require('uuid');
const { messageSchema } = require('@librechat/data-schemas');
const { MongoMemoryServer } = require('mongodb-memory-server');
const {
saveMessage,
getMessages,
updateMessage,
deleteMessages,
bulkSaveMessages,
updateMessageText,
deleteMessagesSince,
} = require('./Message');
jest.mock('~/server/services/Config/getCustomConfig');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
/**
* @type {import('mongoose').Model<import('@librechat/data-schemas').IMessage>}
*/
@@ -117,21 +121,21 @@ describe('Message Operations', () => {
const conversationId = uuidv4();
// Create multiple messages in the same conversation
const message1 = await saveMessage(mockReq, {
await saveMessage(mockReq, {
messageId: 'msg1',
conversationId,
text: 'First message',
user: 'user123',
});
const message2 = await saveMessage(mockReq, {
await saveMessage(mockReq, {
messageId: 'msg2',
conversationId,
text: 'Second message',
user: 'user123',
});
const message3 = await saveMessage(mockReq, {
await saveMessage(mockReq, {
messageId: 'msg3',
conversationId,
text: 'Third message',
@@ -314,4 +318,265 @@ describe('Message Operations', () => {
expect(messages[0].text).toBe('Victim message');
});
});
describe('isTemporary message handling', () => {
beforeEach(() => {
// Reset mocks before each test
jest.clearAllMocks();
});
it('should save a message with expiredAt when isTemporary is true', async () => {
// Mock custom config with 24 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
const afterSave = new Date();
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeDefined();
expect(result.expiredAt).toBeInstanceOf(Date);
// Verify expiredAt is approximately 24 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
new Date(afterSave.getTime() + 24 * 60 * 60 * 1000 + 1000).getTime(),
);
});
it('should save a message without expiredAt when isTemporary is false', async () => {
mockReq.body = { isTemporary: false };
const result = await saveMessage(mockReq, mockMessageData);
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeNull();
});
it('should save a message without expiredAt when isTemporary is not provided', async () => {
// No isTemporary in body
mockReq.body = {};
const result = await saveMessage(mockReq, mockMessageData);
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeNull();
});
it('should use custom retention period from config', async () => {
// Mock custom config with 48 hour retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 48,
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 48 hours in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 48 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle minimum retention period (1 hour)', async () => {
// Mock custom config with less than minimum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 0.5, // Half hour - should be clamped to 1 hour
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 1 hour in the future (minimum)
const expectedExpirationTime = new Date(beforeSave.getTime() + 1 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle maximum retention period (8760 hours)', async () => {
// Mock custom config with more than maximum retention
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 10000, // Should be clamped to 8760 hours
},
});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Verify expiredAt is approximately 8760 hours (1 year) in the future
const expectedExpirationTime = new Date(beforeSave.getTime() + 8760 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should handle getCustomConfig errors gracefully', async () => {
// Mock getCustomConfig to throw an error
getCustomConfig.mockRejectedValue(new Error('Config service unavailable'));
mockReq.body = { isTemporary: true };
const result = await saveMessage(mockReq, mockMessageData);
// Should still save the message but with expiredAt as null
expect(result.messageId).toBe('msg123');
expect(result.expiredAt).toBeNull();
});
it('should use default retention when config is not provided', async () => {
// Mock getCustomConfig to return empty config
getCustomConfig.mockResolvedValue({});
mockReq.body = { isTemporary: true };
const beforeSave = new Date();
const result = await saveMessage(mockReq, mockMessageData);
expect(result.expiredAt).toBeDefined();
// Default retention is 30 days (720 hours)
const expectedExpirationTime = new Date(beforeSave.getTime() + 30 * 24 * 60 * 60 * 1000);
const actualExpirationTime = new Date(result.expiredAt);
expect(actualExpirationTime.getTime()).toBeGreaterThanOrEqual(
expectedExpirationTime.getTime() - 1000,
);
expect(actualExpirationTime.getTime()).toBeLessThanOrEqual(
expectedExpirationTime.getTime() + 1000,
);
});
it('should not update expiredAt on message update', async () => {
// First save a temporary message
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const savedMessage = await saveMessage(mockReq, mockMessageData);
const originalExpiredAt = savedMessage.expiredAt;
// Now update the message without isTemporary flag
mockReq.body = {};
const updatedMessage = await updateMessage(mockReq, {
messageId: 'msg123',
text: 'Updated text',
});
// expiredAt should not be in the returned updated message object
expect(updatedMessage.expiredAt).toBeUndefined();
// Verify in database that expiredAt wasn't changed
const dbMessage = await Message.findOne({ messageId: 'msg123', user: 'user123' });
expect(dbMessage.expiredAt).toEqual(originalExpiredAt);
});
it('should preserve expiredAt when saving existing temporary message', async () => {
// First save a temporary message
getCustomConfig.mockResolvedValue({
interface: {
temporaryChatRetention: 24,
},
});
mockReq.body = { isTemporary: true };
const firstSave = await saveMessage(mockReq, mockMessageData);
const originalExpiredAt = firstSave.expiredAt;
// Wait a bit to ensure time difference
await new Promise((resolve) => setTimeout(resolve, 100));
// Save again with same messageId but different text
const updatedData = { ...mockMessageData, text: 'Updated text' };
const secondSave = await saveMessage(mockReq, updatedData);
// Should update text but create new expiredAt
expect(secondSave.text).toBe('Updated text');
expect(secondSave.expiredAt).toBeDefined();
expect(new Date(secondSave.expiredAt).getTime()).toBeGreaterThan(
new Date(originalExpiredAt).getTime(),
);
});
it('should handle bulk operations with temporary messages', async () => {
// This test verifies bulkSaveMessages doesn't interfere with expiredAt
const messages = [
{
messageId: 'bulk1',
conversationId: uuidv4(),
text: 'Bulk message 1',
user: 'user123',
expiredAt: new Date(Date.now() + 24 * 60 * 60 * 1000),
},
{
messageId: 'bulk2',
conversationId: uuidv4(),
text: 'Bulk message 2',
user: 'user123',
expiredAt: null,
},
];
await bulkSaveMessages(messages);
const savedMessages = await Message.find({
messageId: { $in: ['bulk1', 'bulk2'] },
}).lean();
expect(savedMessages).toHaveLength(2);
const bulk1 = savedMessages.find((m) => m.messageId === 'bulk1');
const bulk2 = savedMessages.find((m) => m.messageId === 'bulk2');
expect(bulk1.expiredAt).toBeDefined();
expect(bulk2.expiredAt).toBeNull();
});
});
});

View File

@@ -135,10 +135,11 @@ const tokenValues = Object.assign(
'grok-2-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-latest': { prompt: 2.0, completion: 10.0 },
'grok-2': { prompt: 2.0, completion: 10.0 },
'grok-3-mini-fast': { prompt: 0.4, completion: 4 },
'grok-3-mini-fast': { prompt: 0.6, completion: 4 },
'grok-3-mini': { prompt: 0.3, completion: 0.5 },
'grok-3-fast': { prompt: 5.0, completion: 25.0 },
'grok-3': { prompt: 3.0, completion: 15.0 },
'grok-4': { prompt: 3.0, completion: 15.0 },
'grok-beta': { prompt: 5.0, completion: 15.0 },
'mistral-large': { prompt: 2.0, completion: 6.0 },
'pixtral-large': { prompt: 2.0, completion: 6.0 },

View File

@@ -636,6 +636,15 @@ describe('Grok Model Tests - Pricing', () => {
);
});
test('should return correct prompt and completion rates for Grok 4 model', () => {
expect(getMultiplier({ model: 'grok-4-0709', tokenType: 'prompt' })).toBe(
tokenValues['grok-4'].prompt,
);
expect(getMultiplier({ model: 'grok-4-0709', tokenType: 'completion' })).toBe(
tokenValues['grok-4'].completion,
);
});
test('should return correct prompt and completion rates for Grok 3 models with prefixes', () => {
expect(getMultiplier({ model: 'xai/grok-3', tokenType: 'prompt' })).toBe(
tokenValues['grok-3'].prompt,
@@ -662,6 +671,15 @@ describe('Grok Model Tests - Pricing', () => {
tokenValues['grok-3-mini-fast'].completion,
);
});
test('should return correct prompt and completion rates for Grok 4 model with prefixes', () => {
expect(getMultiplier({ model: 'xai/grok-4-0709', tokenType: 'prompt' })).toBe(
tokenValues['grok-4'].prompt,
);
expect(getMultiplier({ model: 'xai/grok-4-0709', tokenType: 'completion' })).toBe(
tokenValues['grok-4'].completion,
);
});
});
});

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.7.9-rc1",
"version": "v0.8.0-rc1",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -44,19 +44,21 @@
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/community": "^0.3.47",
"@langchain/core": "^0.3.60",
"@langchain/core": "^0.3.62",
"@langchain/google-genai": "^0.2.13",
"@langchain/google-vertexai": "^0.2.13",
"@langchain/openai": "^0.5.18",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.50",
"@librechat/agents": "^2.4.69",
"@librechat/api": "*",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",
"@modelcontextprotocol/sdk": "^1.17.1",
"@node-saml/passport-saml": "^5.1.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2",
"bcryptjs": "^2.4.3",
"compression": "^1.7.4",
"connect-redis": "^7.1.0",
"compression": "^1.8.1",
"connect-redis": "^8.1.0",
"cookie": "^0.7.2",
"cookie-parser": "^1.4.7",
"cors": "^2.8.5",
@@ -66,10 +68,11 @@
"express": "^4.21.2",
"express-mongo-sanitize": "^2.2.0",
"express-rate-limit": "^7.4.1",
"express-session": "^1.18.1",
"express-session": "^1.18.2",
"express-static-gzip": "^2.2.0",
"file-type": "^18.7.0",
"firebase": "^11.0.2",
"form-data": "^4.0.4",
"googleapis": "^126.0.1",
"handlebars": "^4.7.7",
"https-proxy-agent": "^7.0.6",
@@ -87,12 +90,12 @@
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^8.12.1",
"multer": "^2.0.1",
"multer": "^2.0.2",
"nanoid": "^3.3.7",
"node-fetch": "^2.7.0",
"nodemailer": "^6.9.15",
"ollama": "^0.5.0",
"openai": "^4.96.2",
"openai": "^5.10.1",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^6.5.0",
"passport": "^0.6.0",
@@ -117,7 +120,7 @@
},
"devDependencies": {
"jest": "^29.7.0",
"mongodb-memory-server": "^10.1.3",
"mongodb-memory-server": "^10.1.4",
"nodemon": "^3.0.3",
"supertest": "^7.1.0"
}

View File

@@ -24,17 +24,23 @@ const handleValidationError = (err, res) => {
}
};
// eslint-disable-next-line no-unused-vars
module.exports = (err, req, res, next) => {
module.exports = (err, _req, res, _next) => {
try {
if (err.name === 'ValidationError') {
return (err = handleValidationError(err, res));
return handleValidationError(err, res);
}
if (err.code && err.code == 11000) {
return (err = handleDuplicateKeyError(err, res));
return handleDuplicateKeyError(err, res);
}
} catch (err) {
// Special handling for errors like SyntaxError
if (err.statusCode && err.body) {
return res.status(err.statusCode).send(err.body);
}
logger.error('ErrorController => error', err);
res.status(500).send('An unknown error occurred.');
return res.status(500).send('An unknown error occurred.');
} catch (err) {
logger.error('ErrorController => processing error', err);
return res.status(500).send('Processing error in ErrorController.');
}
};

View File

@@ -0,0 +1,241 @@
const errorController = require('./ErrorController');
const { logger } = require('~/config');
// Mock the logger
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
},
}));
describe('ErrorController', () => {
let mockReq, mockRes, mockNext;
beforeEach(() => {
mockReq = {};
mockRes = {
status: jest.fn().mockReturnThis(),
send: jest.fn(),
};
mockNext = jest.fn();
logger.error.mockClear();
});
describe('ValidationError handling', () => {
it('should handle ValidationError with single error', () => {
const validationError = {
name: 'ValidationError',
errors: {
email: { message: 'Email is required', path: 'email' },
},
};
errorController(validationError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith({
messages: '["Email is required"]',
fields: '["email"]',
});
expect(logger.error).toHaveBeenCalledWith('Validation error:', validationError.errors);
});
it('should handle ValidationError with multiple errors', () => {
const validationError = {
name: 'ValidationError',
errors: {
email: { message: 'Email is required', path: 'email' },
password: { message: 'Password is required', path: 'password' },
},
};
errorController(validationError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith({
messages: '"Email is required Password is required"',
fields: '["email","password"]',
});
expect(logger.error).toHaveBeenCalledWith('Validation error:', validationError.errors);
});
it('should handle ValidationError with empty errors object', () => {
const validationError = {
name: 'ValidationError',
errors: {},
};
errorController(validationError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith({
messages: '[]',
fields: '[]',
});
});
});
describe('Duplicate key error handling', () => {
it('should handle duplicate key error (code 11000)', () => {
const duplicateKeyError = {
code: 11000,
keyValue: { email: 'test@example.com' },
};
errorController(duplicateKeyError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(409);
expect(mockRes.send).toHaveBeenCalledWith({
messages: 'An document with that ["email"] already exists.',
fields: '["email"]',
});
expect(logger.error).toHaveBeenCalledWith('Duplicate key error:', duplicateKeyError.keyValue);
});
it('should handle duplicate key error with multiple fields', () => {
const duplicateKeyError = {
code: 11000,
keyValue: { email: 'test@example.com', username: 'testuser' },
};
errorController(duplicateKeyError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(409);
expect(mockRes.send).toHaveBeenCalledWith({
messages: 'An document with that ["email","username"] already exists.',
fields: '["email","username"]',
});
expect(logger.error).toHaveBeenCalledWith('Duplicate key error:', duplicateKeyError.keyValue);
});
it('should handle error with code 11000 as string', () => {
const duplicateKeyError = {
code: '11000',
keyValue: { email: 'test@example.com' },
};
errorController(duplicateKeyError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(409);
expect(mockRes.send).toHaveBeenCalledWith({
messages: 'An document with that ["email"] already exists.',
fields: '["email"]',
});
});
});
describe('SyntaxError handling', () => {
it('should handle errors with statusCode and body', () => {
const syntaxError = {
statusCode: 400,
body: 'Invalid JSON syntax',
};
errorController(syntaxError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.send).toHaveBeenCalledWith('Invalid JSON syntax');
});
it('should handle errors with different statusCode and body', () => {
const customError = {
statusCode: 422,
body: { error: 'Unprocessable entity' },
};
errorController(customError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(422);
expect(mockRes.send).toHaveBeenCalledWith({ error: 'Unprocessable entity' });
});
it('should handle error with statusCode but no body', () => {
const partialError = {
statusCode: 400,
};
errorController(partialError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
});
it('should handle error with body but no statusCode', () => {
const partialError = {
body: 'Some error message',
};
errorController(partialError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
});
});
describe('Unknown error handling', () => {
it('should handle unknown errors', () => {
const unknownError = new Error('Some unknown error');
errorController(unknownError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
expect(logger.error).toHaveBeenCalledWith('ErrorController => error', unknownError);
});
it('should handle errors with code other than 11000', () => {
const mongoError = {
code: 11100,
message: 'Some MongoDB error',
};
errorController(mongoError, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('An unknown error occurred.');
expect(logger.error).toHaveBeenCalledWith('ErrorController => error', mongoError);
});
it('should handle null/undefined errors', () => {
errorController(null, mockReq, mockRes, mockNext);
expect(mockRes.status).toHaveBeenCalledWith(500);
expect(mockRes.send).toHaveBeenCalledWith('Processing error in ErrorController.');
expect(logger.error).toHaveBeenCalledWith(
'ErrorController => processing error',
expect.any(Error),
);
});
});
describe('Catch block handling', () => {
beforeEach(() => {
// Restore logger mock to normal behavior for these tests
logger.error.mockRestore();
logger.error = jest.fn();
});
it('should handle errors when logger.error throws', () => {
// Create fresh mocks for this test
const freshMockRes = {
status: jest.fn().mockReturnThis(),
send: jest.fn(),
};
// Mock logger to throw on the first call, succeed on the second
logger.error
.mockImplementationOnce(() => {
throw new Error('Logger error');
})
.mockImplementation(() => {});
const testError = new Error('Test error');
errorController(testError, mockReq, freshMockRes, mockNext);
expect(freshMockRes.status).toHaveBeenCalledWith(500);
expect(freshMockRes.send).toHaveBeenCalledWith('Processing error in ErrorController.');
expect(logger.error).toHaveBeenCalledTimes(2);
});
});
});

View File

@@ -1,11 +1,10 @@
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, AuthType } = require('librechat-data-provider');
const { CacheKeys, AuthType, Constants } = require('librechat-data-provider');
const { getCustomConfig, getCachedTools } = require('~/server/services/Config');
const { getToolkitKey } = require('~/server/services/ToolService');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { availableTools } = require('~/app/clients/tools');
const { getLogStores } = require('~/cache');
const { Constants } = require('librechat-data-provider');
/**
* Filters out duplicate plugins from the list of plugins.
@@ -139,15 +138,21 @@ function createGetServerTools() {
*/
const getAvailableTools = async (req, res) => {
try {
const userId = req.user?.id;
const customConfig = await getCustomConfig();
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedTools = await cache.get(CacheKeys.TOOLS);
if (cachedTools) {
res.status(200).json(cachedTools);
const cachedToolsArray = await cache.get(CacheKeys.TOOLS);
const cachedUserTools = await getCachedTools({ userId });
const userPlugins = convertMCPToolsToPlugins(cachedUserTools, customConfig);
if (cachedToolsArray && userPlugins) {
const dedupedTools = filterUniquePlugins([...userPlugins, ...cachedToolsArray]);
res.status(200).json(dedupedTools);
return;
}
// If not in cache, build from manifest
let pluginManifest = availableTools;
const customConfig = await getCustomConfig();
if (customConfig?.mcpServers != null) {
const mcpManager = getMCPManager();
const flowsCache = getLogStores(CacheKeys.FLOWS);
@@ -173,7 +178,7 @@ const getAvailableTools = async (req, res) => {
}
});
const toolDefinitions = await getCachedTools({ includeGlobal: true });
const toolDefinitions = (await getCachedTools({ includeGlobal: true })) || {};
const toolsOutput = [];
for (const plugin of authenticatedPlugins) {
@@ -218,16 +223,70 @@ const getAvailableTools = async (req, res) => {
toolsOutput.push(toolToAdd);
}
const finalTools = filterUniquePlugins(toolsOutput);
await cache.set(CacheKeys.TOOLS, finalTools);
res.status(200).json(finalTools);
const dedupedTools = filterUniquePlugins([...userPlugins, ...finalTools]);
res.status(200).json(dedupedTools);
} catch (error) {
logger.error('[getAvailableTools]', error);
res.status(500).json({ message: error.message });
}
};
/**
* Converts MCP function format tools to plugin format
* @param {Object} functionTools - Object with function format tools
* @param {Object} customConfig - Custom configuration for MCP servers
* @returns {Array} Array of plugin objects
*/
function convertMCPToolsToPlugins(functionTools, customConfig) {
const plugins = [];
for (const [toolKey, toolData] of Object.entries(functionTools)) {
if (!toolData.function || !toolKey.includes(Constants.mcp_delimiter)) {
continue;
}
const functionData = toolData.function;
const parts = toolKey.split(Constants.mcp_delimiter);
const serverName = parts[parts.length - 1];
const serverConfig = customConfig?.mcpServers?.[serverName];
const plugin = {
name: parts[0], // Use the tool name without server suffix
pluginKey: toolKey,
description: functionData.description || '',
authenticated: true,
icon: serverConfig?.iconPath,
};
// Build authConfig for MCP tools
if (!serverConfig?.customUserVars) {
plugin.authConfig = [];
plugins.push(plugin);
continue;
}
const customVarKeys = Object.keys(serverConfig.customUserVars);
if (customVarKeys.length === 0) {
plugin.authConfig = [];
} else {
plugin.authConfig = Object.entries(serverConfig.customUserVars).map(([key, value]) => ({
authField: key,
label: value.title || key,
description: value.description || '',
}));
}
plugins.push(plugin);
}
return plugins;
}
module.exports = {
getAvailableTools,
getAvailablePluginsController,

View File

@@ -0,0 +1,89 @@
const { Constants } = require('librechat-data-provider');
const { getCustomConfig, getCachedTools } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
// Mock the dependencies
jest.mock('@librechat/data-schemas', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
},
}));
jest.mock('~/server/services/Config', () => ({
getCustomConfig: jest.fn(),
getCachedTools: jest.fn(),
}));
jest.mock('~/server/services/ToolService', () => ({
getToolkitKey: jest.fn(),
}));
jest.mock('~/config', () => ({
getMCPManager: jest.fn(() => ({
loadManifestTools: jest.fn().mockResolvedValue([]),
})),
getFlowStateManager: jest.fn(),
}));
jest.mock('~/app/clients/tools', () => ({
availableTools: [],
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(),
}));
// Import the actual module with the function we want to test
const { getAvailableTools } = require('./PluginController');
describe('PluginController', () => {
describe('plugin.icon behavior', () => {
let mockReq, mockRes, mockCache;
const callGetAvailableToolsWithMCPServer = async (mcpServers) => {
mockCache.get.mockResolvedValue(null);
getCustomConfig.mockResolvedValue({ mcpServers });
const functionTools = {
[`test-tool${Constants.mcp_delimiter}test-server`]: {
function: { name: 'test-tool', description: 'A test tool' },
},
};
getCachedTools.mockResolvedValueOnce(functionTools);
getCachedTools.mockResolvedValueOnce({
[`test-tool${Constants.mcp_delimiter}test-server`]: true,
});
await getAvailableTools(mockReq, mockRes);
const responseData = mockRes.json.mock.calls[0][0];
return responseData.find((tool) => tool.name === 'test-tool');
};
beforeEach(() => {
jest.clearAllMocks();
mockReq = { user: { id: 'test-user-id' } };
mockRes = { status: jest.fn().mockReturnThis(), json: jest.fn() };
mockCache = { get: jest.fn(), set: jest.fn() };
getLogStores.mockReturnValue(mockCache);
});
it('should set plugin.icon when iconPath is defined', async () => {
const mcpServers = {
'test-server': {
iconPath: '/path/to/icon.png',
},
};
const testTool = await callGetAvailableToolsWithMCPServer(mcpServers);
expect(testTool.icon).toBe('/path/to/icon.png');
});
it('should set plugin.icon to undefined when iconPath is not defined', async () => {
const mcpServers = {
'test-server': {},
};
const testTool = await callGetAvailableToolsWithMCPServer(mcpServers);
expect(testTool.icon).toBeUndefined();
});
});
});

View File

@@ -1,11 +1,5 @@
const {
Tools,
Constants,
FileSources,
webSearchKeys,
extractWebSearchEnvVars,
} = require('librechat-data-provider');
const { logger } = require('@librechat/data-schemas');
const { webSearchKeys, extractWebSearchEnvVars } = require('@librechat/api');
const {
getFiles,
updateUser,
@@ -20,6 +14,7 @@ const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/service
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
const { needsRefresh, getNewS3URL } = require('~/server/services/Files/S3/crud');
const { Tools, Constants, FileSources } = require('librechat-data-provider');
const { processDeleteRequest } = require('~/server/services/Files/process');
const { Transaction, Balance, User } = require('~/db/models');
const { deleteToolCalls } = require('~/models/ToolCall');
@@ -180,14 +175,16 @@ const updateUserPluginsController = async (req, res) => {
try {
const mcpManager = getMCPManager(user.id);
if (mcpManager) {
// Extract server name from pluginKey (format: "mcp_<serverName>")
const serverName = pluginKey.replace(Constants.mcp_prefix, '');
logger.info(
`[updateUserPluginsController] Disconnecting MCP connections for user ${user.id} after plugin auth update for ${pluginKey}.`,
`[updateUserPluginsController] Disconnecting MCP server ${serverName} for user ${user.id} after plugin auth update for ${pluginKey}.`,
);
await mcpManager.disconnectUserConnections(user.id);
await mcpManager.disconnectUserConnection(user.id, serverName);
}
} catch (disconnectError) {
logger.error(
`[updateUserPluginsController] Error disconnecting MCP connections for user ${user.id} after plugin auth update:`,
`[updateUserPluginsController] Error disconnecting MCP connection for user ${user.id} after plugin auth update:`,
disconnectError,
);
// Do not fail the request for this, but log it.

View File

@@ -1,20 +1,23 @@
require('events').EventEmitter.defaultMaxListeners = 100;
const { logger } = require('@librechat/data-schemas');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const {
sendEvent,
createRun,
Tokenizer,
checkAccess,
memoryInstructions,
formatContentStrings,
createMemoryProcessor,
} = require('@librechat/api');
const {
Callback,
Providers,
GraphEvents,
TitleMethod,
formatMessage,
formatAgentMessages,
formatContentStrings,
getTokenCountForMessage,
createMetadataAggregator,
} = require('@librechat/agents');
@@ -24,20 +27,22 @@ const {
VisionModes,
ContentTypes,
EModelEndpoint,
KnownEndpoints,
PermissionTypes,
isAgentsEndpoint,
AgentCapabilities,
bedrockInputSchema,
removeNullishValues,
} = require('librechat-data-provider');
const { DynamicStructuredTool } = require('@langchain/core/tools');
const { getBufferString, HumanMessage } = require('@langchain/core/messages');
const { createGetMCPAuthMap, checkCapability } = require('~/server/services/Config');
const {
findPluginAuthsByKeys,
getFormattedMemories,
deleteMemory,
setMemory,
} = require('~/models');
const { getMCPAuthMap, checkCapability, hasCustomUserVars } = require('~/server/services/Config');
const { addCacheControl, createContextHandlers } = require('~/app/clients/prompts');
const { initializeAgent } = require('~/server/services/Endpoints/agents/agent');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getFormattedMemories, deleteMemory, setMemory } = require('~/models');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { getProviderConfig } = require('~/server/services/Endpoints');
const BaseClient = require('~/app/clients/BaseClient');
@@ -54,6 +59,7 @@ const omitTitleOptions = new Set([
'thinkingBudget',
'includeThoughts',
'maxOutputTokens',
'additionalModelRequestFields',
]);
/**
@@ -65,13 +71,15 @@ const payloadParser = ({ req, agent, endpoint }) => {
if (isAgentsEndpoint(endpoint)) {
return { model: undefined };
} else if (endpoint === EModelEndpoint.bedrock) {
return bedrockInputSchema.parse(agent.model_parameters);
const parsedValues = bedrockInputSchema.parse(agent.model_parameters);
if (parsedValues.thinking == null) {
parsedValues.thinking = false;
}
return parsedValues;
}
return req.body.endpointOption.model_parameters;
};
const legacyContentEndpoints = new Set([KnownEndpoints.groq, KnownEndpoints.deepseek]);
const noSystemModelRegex = [/\b(o1-preview|o1-mini|amazon\.titan-text)\b/gi];
function createTokenCounter(encoding) {
@@ -452,6 +460,12 @@ class AgentClient extends BaseClient {
res: this.options.res,
agent: prelimAgent,
allowedProviders,
endpointOption: {
endpoint:
prelimAgent.id !== Constants.EPHEMERAL_AGENT_ID
? EModelEndpoint.agents
: memoryConfig.agent?.provider,
},
});
if (!agent) {
@@ -498,6 +512,39 @@ class AgentClient extends BaseClient {
return withoutKeys;
}
/**
* Filters out image URLs from message content
* @param {BaseMessage} message - The message to filter
* @returns {BaseMessage} - A new message with image URLs removed
*/
filterImageUrls(message) {
if (!message.content || typeof message.content === 'string') {
return message;
}
if (Array.isArray(message.content)) {
const filteredContent = message.content.filter(
(part) => part.type !== ContentTypes.IMAGE_URL,
);
if (filteredContent.length === 1 && filteredContent[0].type === ContentTypes.TEXT) {
const MessageClass = message.constructor;
return new MessageClass({
content: filteredContent[0].text,
additional_kwargs: message.additional_kwargs,
});
}
const MessageClass = message.constructor;
return new MessageClass({
content: filteredContent,
additional_kwargs: message.additional_kwargs,
});
}
return message;
}
/**
* @param {BaseMessage[]} messages
* @returns {Promise<void | (TAttachment | null)[]>}
@@ -526,7 +573,8 @@ class AgentClient extends BaseClient {
}
}
const bufferString = getBufferString(messagesToProcess);
const filteredMessages = messagesToProcess.map((msg) => this.filterImageUrls(msg));
const bufferString = getBufferString(filteredMessages);
const bufferMessage = new HumanMessage(`# Current Chat:\n\n${bufferString}`);
return await this.processMemory([bufferMessage]);
} catch (error) {
@@ -700,17 +748,12 @@ class AgentClient extends BaseClient {
version: 'v2',
};
const getUserMCPAuthMap = await createGetMCPAuthMap();
const toolSet = new Set((this.options.agent.tools ?? []).map((tool) => tool && tool.name));
let { messages: initialMessages, indexTokenCountMap } = formatAgentMessages(
payload,
this.indexTokenCountMap,
toolSet,
);
if (legacyContentEndpoints.has(this.options.agent.endpoint?.toLowerCase())) {
initialMessages = formatContentStrings(initialMessages);
}
/**
*
@@ -726,6 +769,9 @@ class AgentClient extends BaseClient {
if (i > 0) {
this.model = agent.model_parameters.model;
}
if (i > 0 && config.signal == null) {
config.signal = abortController.signal;
}
if (agent.recursion_limit && typeof agent.recursion_limit === 'number') {
config.recursionLimit = agent.recursion_limit;
}
@@ -774,6 +820,9 @@ class AgentClient extends BaseClient {
}
let messages = _messages;
if (agent.useLegacyContent === true) {
messages = formatContentStrings(messages);
}
if (
agent.model_parameters?.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes(
'prompt-caching',
@@ -822,10 +871,11 @@ class AgentClient extends BaseClient {
}
try {
if (getUserMCPAuthMap) {
config.configurable.userMCPAuthMap = await getUserMCPAuthMap({
if (await hasCustomUserVars()) {
config.configurable.userMCPAuthMap = await getMCPAuthMap({
tools: agent.tools,
userId: this.options.req.user.id,
findPluginAuthsByKeys,
});
}
} catch (err) {
@@ -1001,25 +1051,40 @@ class AgentClient extends BaseClient {
}
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
const { req, res, agent } = this.options;
const endpoint = agent.endpoint;
let endpoint = agent.endpoint;
/** @type {import('@librechat/agents').ClientOptions} */
let clientOptions = {
maxTokens: 75,
model: agent.model_parameters.model,
model: agent.model || agent.model_parameters.model,
};
const { getOptions, overrideProvider, customEndpointConfig } =
await getProviderConfig(endpoint);
let titleProviderConfig = await getProviderConfig(endpoint);
/** @type {TEndpoint | undefined} */
const endpointConfig = req.app.locals[endpoint] ?? customEndpointConfig;
const endpointConfig =
req.app.locals.all ?? req.app.locals[endpoint] ?? titleProviderConfig.customEndpointConfig;
if (!endpointConfig) {
logger.warn(
'[api/server/controllers/agents/client.js #titleConvo] Error getting endpoint config',
);
}
if (endpointConfig?.titleEndpoint && endpointConfig.titleEndpoint !== endpoint) {
try {
titleProviderConfig = await getProviderConfig(endpointConfig.titleEndpoint);
endpoint = endpointConfig.titleEndpoint;
} catch (error) {
logger.warn(
`[api/server/controllers/agents/client.js #titleConvo] Error getting title endpoint config for ${endpointConfig.titleEndpoint}, falling back to default`,
error,
);
// Fall back to original provider config
endpoint = agent.endpoint;
titleProviderConfig = await getProviderConfig(endpoint);
}
}
if (
endpointConfig &&
endpointConfig.titleModel &&
@@ -1028,7 +1093,7 @@ class AgentClient extends BaseClient {
clientOptions.model = endpointConfig.titleModel;
}
const options = await getOptions({
const options = await titleProviderConfig.getOptions({
req,
res,
optionsOnly: true,
@@ -1037,12 +1102,18 @@ class AgentClient extends BaseClient {
endpointOption: { model_parameters: clientOptions },
});
let provider = options.provider ?? overrideProvider ?? agent.provider;
let provider = options.provider ?? titleProviderConfig.overrideProvider ?? agent.provider;
if (
endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName == null
) {
provider = Providers.OPENAI;
} else if (
endpoint === EModelEndpoint.azureOpenAI &&
options.llmConfig?.azureOpenAIApiInstanceName != null &&
provider !== Providers.AZURE
) {
provider = Providers.AZURE;
}
/** @type {import('@librechat/agents').ClientOptions} */
@@ -1064,16 +1135,23 @@ class AgentClient extends BaseClient {
),
);
if (provider === Providers.GOOGLE) {
if (
provider === Providers.GOOGLE &&
(endpointConfig?.titleMethod === TitleMethod.FUNCTIONS ||
endpointConfig?.titleMethod === TitleMethod.STRUCTURED)
) {
clientOptions.json = true;
}
try {
const titleResult = await this.run.generateTitle({
provider,
clientOptions,
inputText: text,
contentParts: this.contentParts,
clientOptions,
titleMethod: endpointConfig?.titleMethod,
titlePrompt: endpointConfig?.titlePrompt,
titlePromptTemplate: endpointConfig?.titlePromptTemplate,
chainOptions: {
signal: abortController.signal,
callbacks: [
@@ -1121,8 +1199,52 @@ class AgentClient extends BaseClient {
}
}
/** Silent method, as `recordCollectedUsage` is used instead */
async recordTokenUsage() {}
/**
* @param {object} params
* @param {number} params.promptTokens
* @param {number} params.completionTokens
* @param {OpenAIUsageMetadata} [params.usage]
* @param {string} [params.model]
* @param {string} [params.context='message']
* @returns {Promise<void>}
*/
async recordTokenUsage({ model, promptTokens, completionTokens, usage, context = 'message' }) {
try {
await spendTokens(
{
model,
context,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
);
if (
usage &&
typeof usage === 'object' &&
'reasoning_tokens' in usage &&
typeof usage.reasoning_tokens === 'number'
) {
await spendTokens(
{
model,
context: 'reasoning',
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ completionTokens: usage.reasoning_tokens },
);
}
} catch (error) {
logger.error(
'[api/server/controllers/agents/client.js #recordTokenUsage] Error recording token usage',
error,
);
}
}
getEncoding() {
return 'o200k_base';

View File

@@ -0,0 +1,957 @@
const { Providers } = require('@librechat/agents');
const { Constants, EModelEndpoint } = require('librechat-data-provider');
const AgentClient = require('./client');
jest.mock('@librechat/agents', () => ({
...jest.requireActual('@librechat/agents'),
createMetadataAggregator: () => ({
handleLLMEnd: jest.fn(),
collected: [],
}),
}));
describe('AgentClient - titleConvo', () => {
let client;
let mockRun;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
beforeEach(() => {
// Reset all mocks
jest.clearAllMocks();
// Mock run object
mockRun = {
generateTitle: jest.fn().mockResolvedValue({
title: 'Generated Title',
}),
};
// Mock agent - with both endpoint and provider
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI, // Use a valid provider as endpoint for getProviderConfig
provider: EModelEndpoint.openAI, // Add provider property
model_parameters: {
model: 'gpt-4',
},
};
// Mock request and response
mockReq = {
app: {
locals: {
[EModelEndpoint.openAI]: {
// Match the agent endpoint
titleModel: 'gpt-3.5-turbo',
titlePrompt: 'Custom title prompt',
titleMethod: 'structured',
titlePromptTemplate: 'Template: {{content}}',
},
},
},
user: {
id: 'user-123',
},
body: {
model: 'gpt-4',
endpoint: EModelEndpoint.openAI,
key: null,
},
};
mockRes = {};
// Mock options
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
endpointTokenConfig: {},
};
// Create client instance
client = new AgentClient(mockOptions);
client.run = mockRun;
client.responseMessageId = 'response-123';
client.conversationId = 'convo-123';
client.contentParts = [{ type: 'text', text: 'Test content' }];
client.recordCollectedUsage = jest.fn().mockResolvedValue(); // Mock as async function that resolves
});
describe('titleConvo method', () => {
it('should throw error if run is not initialized', async () => {
client.run = null;
await expect(
client.titleConvo({ text: 'Test', abortController: new AbortController() }),
).rejects.toThrow('Run not initialized');
});
it('should use titlePrompt from endpoint config', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titlePrompt: 'Custom title prompt',
}),
);
});
it('should use titlePromptTemplate from endpoint config', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titlePromptTemplate: 'Template: {{content}}',
}),
);
});
it('should use titleMethod from endpoint config', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
provider: Providers.OPENAI,
titleMethod: 'structured',
}),
);
});
it('should use titleModel from endpoint config when provided', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Check that generateTitle was called with correct clientOptions
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('gpt-3.5-turbo');
});
it('should handle missing endpoint config gracefully', async () => {
// Remove endpoint config
mockReq.app.locals[EModelEndpoint.openAI] = undefined;
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titlePrompt: undefined,
titlePromptTemplate: undefined,
titleMethod: undefined,
}),
);
});
it('should use agent model when titleModel is not provided', async () => {
// Remove titleModel from config
delete mockReq.app.locals[EModelEndpoint.openAI].titleModel;
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('gpt-4'); // Should use agent's model
});
it('should not use titleModel when it equals CURRENT_MODEL constant', async () => {
mockReq.app.locals[EModelEndpoint.openAI].titleModel = Constants.CURRENT_MODEL;
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('gpt-4'); // Should use agent's model
});
it('should pass all required parameters to generateTitle', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
expect(mockRun.generateTitle).toHaveBeenCalledWith({
provider: expect.any(String),
inputText: text,
contentParts: client.contentParts,
clientOptions: expect.objectContaining({
model: 'gpt-3.5-turbo',
}),
titlePrompt: 'Custom title prompt',
titlePromptTemplate: 'Template: {{content}}',
titleMethod: 'structured',
chainOptions: expect.objectContaining({
signal: abortController.signal,
}),
});
});
it('should record collected usage after title generation', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
expect(client.recordCollectedUsage).toHaveBeenCalledWith({
model: 'gpt-3.5-turbo',
context: 'title',
collectedUsage: expect.any(Array),
});
});
it('should return the generated title', async () => {
const text = 'Test conversation text';
const abortController = new AbortController();
const result = await client.titleConvo({ text, abortController });
expect(result).toBe('Generated Title');
});
it('should handle errors gracefully and return undefined', async () => {
mockRun.generateTitle.mockRejectedValue(new Error('Title generation failed'));
const text = 'Test conversation text';
const abortController = new AbortController();
const result = await client.titleConvo({ text, abortController });
expect(result).toBeUndefined();
});
it('should pass titleEndpoint configuration to generateTitle', async () => {
// Mock the API key just for this test
const originalApiKey = process.env.ANTHROPIC_API_KEY;
process.env.ANTHROPIC_API_KEY = 'test-api-key';
// Add titleEndpoint to the config
mockReq.app.locals[EModelEndpoint.openAI].titleEndpoint = EModelEndpoint.anthropic;
mockReq.app.locals[EModelEndpoint.openAI].titleMethod = 'structured';
mockReq.app.locals[EModelEndpoint.openAI].titlePrompt = 'Custom title prompt';
mockReq.app.locals[EModelEndpoint.openAI].titlePromptTemplate = 'Custom template';
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify generateTitle was called with the custom configuration
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titleMethod: 'structured',
provider: Providers.ANTHROPIC,
titlePrompt: 'Custom title prompt',
titlePromptTemplate: 'Custom template',
}),
);
// Restore the original API key
if (originalApiKey) {
process.env.ANTHROPIC_API_KEY = originalApiKey;
} else {
delete process.env.ANTHROPIC_API_KEY;
}
});
it('should use all config when endpoint config is missing', async () => {
// Remove endpoint-specific config
delete mockReq.app.locals[EModelEndpoint.openAI].titleModel;
delete mockReq.app.locals[EModelEndpoint.openAI].titlePrompt;
delete mockReq.app.locals[EModelEndpoint.openAI].titleMethod;
delete mockReq.app.locals[EModelEndpoint.openAI].titlePromptTemplate;
// Set 'all' config
mockReq.app.locals.all = {
titleModel: 'gpt-4o-mini',
titlePrompt: 'All config title prompt',
titleMethod: 'completion',
titlePromptTemplate: 'All config template: {{content}}',
};
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify generateTitle was called with 'all' config values
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titleMethod: 'completion',
titlePrompt: 'All config title prompt',
titlePromptTemplate: 'All config template: {{content}}',
}),
);
// Check that the model was set from 'all' config
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('gpt-4o-mini');
});
it('should prioritize all config over endpoint config for title settings', async () => {
// Set both endpoint and 'all' config
mockReq.app.locals[EModelEndpoint.openAI].titleModel = 'gpt-3.5-turbo';
mockReq.app.locals[EModelEndpoint.openAI].titlePrompt = 'Endpoint title prompt';
mockReq.app.locals[EModelEndpoint.openAI].titleMethod = 'structured';
// Remove titlePromptTemplate from endpoint config to test fallback
delete mockReq.app.locals[EModelEndpoint.openAI].titlePromptTemplate;
mockReq.app.locals.all = {
titleModel: 'gpt-4o-mini',
titlePrompt: 'All config title prompt',
titleMethod: 'completion',
titlePromptTemplate: 'All config template',
};
const text = 'Test conversation text';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify 'all' config takes precedence over endpoint config
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titleMethod: 'completion',
titlePrompt: 'All config title prompt',
titlePromptTemplate: 'All config template',
}),
);
// Check that the model was set from 'all' config
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('gpt-4o-mini');
});
it('should use all config with titleEndpoint and verify provider switch', async () => {
// Mock the API key for the titleEndpoint provider
const originalApiKey = process.env.ANTHROPIC_API_KEY;
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key';
// Remove endpoint-specific config to test 'all' config
delete mockReq.app.locals[EModelEndpoint.openAI];
// Set comprehensive 'all' config with all new title options
mockReq.app.locals.all = {
titleConvo: true,
titleModel: 'claude-3-haiku-20240307',
titleMethod: 'completion', // Testing the new default method
titlePrompt: 'Generate a concise, descriptive title for this conversation',
titlePromptTemplate: 'Conversation summary: {{content}}',
titleEndpoint: EModelEndpoint.anthropic, // Should switch provider to Anthropic
};
const text = 'Test conversation about AI and machine learning';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify all config values were used
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
provider: Providers.ANTHROPIC, // Critical: Verify provider switched to Anthropic
titleMethod: 'completion',
titlePrompt: 'Generate a concise, descriptive title for this conversation',
titlePromptTemplate: 'Conversation summary: {{content}}',
inputText: text,
contentParts: client.contentParts,
}),
);
// Verify the model was set from 'all' config
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('claude-3-haiku-20240307');
// Verify other client options are set correctly
expect(generateTitleCall.clientOptions).toMatchObject({
model: 'claude-3-haiku-20240307',
// Note: Anthropic's getOptions may set its own maxTokens value
});
// Restore the original API key
if (originalApiKey) {
process.env.ANTHROPIC_API_KEY = originalApiKey;
} else {
delete process.env.ANTHROPIC_API_KEY;
}
});
it('should test all titleMethod options from all config', async () => {
// Test each titleMethod: 'completion', 'functions', 'structured'
const titleMethods = ['completion', 'functions', 'structured'];
for (const method of titleMethods) {
// Clear previous calls
mockRun.generateTitle.mockClear();
// Remove endpoint config
delete mockReq.app.locals[EModelEndpoint.openAI];
// Set 'all' config with specific titleMethod
mockReq.app.locals.all = {
titleModel: 'gpt-4o-mini',
titleMethod: method,
titlePrompt: `Testing ${method} method`,
titlePromptTemplate: `Template for ${method}: {{content}}`,
};
const text = `Test conversation for ${method} method`;
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify the correct titleMethod was used
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
titleMethod: method,
titlePrompt: `Testing ${method} method`,
titlePromptTemplate: `Template for ${method}: {{content}}`,
}),
);
}
});
describe('Azure-specific title generation', () => {
let originalEnv;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Save original environment variables
originalEnv = { ...process.env };
// Mock Azure API keys
process.env.AZURE_OPENAI_API_KEY = 'test-azure-key';
process.env.AZURE_API_KEY = 'test-azure-key';
process.env.EASTUS_API_KEY = 'test-eastus-key';
process.env.EASTUS2_API_KEY = 'test-eastus2-key';
});
afterEach(() => {
// Restore environment variables
process.env = originalEnv;
});
it('should use OPENAI provider for Azure serverless endpoints', async () => {
// Set up Azure endpoint with serverless config
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI;
mockReq.app.locals[EModelEndpoint.azureOpenAI] = {
titleConvo: true,
titleModel: 'grok-3',
titleMethod: 'completion',
titlePrompt: 'Azure serverless title prompt',
streamRate: 35,
modelGroupMap: {
'grok-3': {
group: 'Azure AI Foundry',
deploymentName: 'grok-3',
},
},
groupMap: {
'Azure AI Foundry': {
apiKey: '${AZURE_API_KEY}',
baseURL: 'https://test.services.ai.azure.com/models',
version: '2024-05-01-preview',
serverless: true,
models: {
'grok-3': {
deploymentName: 'grok-3',
},
},
},
},
};
mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'grok-3';
const text = 'Test Azure serverless conversation';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify provider was switched to OPENAI for serverless
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
provider: Providers.OPENAI, // Should be OPENAI for serverless
titleMethod: 'completion',
titlePrompt: 'Azure serverless title prompt',
}),
);
});
it('should use AZURE provider for Azure endpoints with instanceName', async () => {
// Set up Azure endpoint
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI;
mockReq.app.locals[EModelEndpoint.azureOpenAI] = {
titleConvo: true,
titleModel: 'gpt-4o',
titleMethod: 'structured',
titlePrompt: 'Azure instance title prompt',
streamRate: 35,
modelGroupMap: {
'gpt-4o': {
group: 'eastus',
deploymentName: 'gpt-4o',
},
},
groupMap: {
eastus: {
apiKey: '${EASTUS_API_KEY}',
instanceName: 'region-instance',
version: '2024-02-15-preview',
models: {
'gpt-4o': {
deploymentName: 'gpt-4o',
},
},
},
},
};
mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'gpt-4o';
const text = 'Test Azure instance conversation';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify provider remains AZURE with instanceName
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
provider: Providers.AZURE,
titleMethod: 'structured',
titlePrompt: 'Azure instance title prompt',
}),
);
});
it('should handle Azure titleModel with CURRENT_MODEL constant', async () => {
// Set up Azure endpoint
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI;
mockAgent.model_parameters.model = 'gpt-4o-latest';
mockReq.app.locals[EModelEndpoint.azureOpenAI] = {
titleConvo: true,
titleModel: Constants.CURRENT_MODEL,
titleMethod: 'functions',
streamRate: 35,
modelGroupMap: {
'gpt-4o-latest': {
group: 'region-eastus',
deploymentName: 'gpt-4o-mini',
version: '2024-02-15-preview',
},
},
groupMap: {
'region-eastus': {
apiKey: '${EASTUS2_API_KEY}',
instanceName: 'test-instance',
version: '2024-12-01-preview',
models: {
'gpt-4o-latest': {
deploymentName: 'gpt-4o-mini',
version: '2024-02-15-preview',
},
},
},
},
};
mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'gpt-4o-latest';
const text = 'Test Azure current model';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify it uses the correct model when titleModel is CURRENT_MODEL
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
// When CURRENT_MODEL is used with Azure, the model gets mapped to the deployment name
// In this case, 'gpt-4o-latest' is mapped to 'gpt-4o-mini' deployment
expect(generateTitleCall.clientOptions.model).toBe('gpt-4o-mini');
// Also verify that CURRENT_MODEL constant was not passed as the model
expect(generateTitleCall.clientOptions.model).not.toBe(Constants.CURRENT_MODEL);
});
it('should handle Azure with multiple model groups', async () => {
// Set up Azure endpoint
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI;
mockReq.app.locals[EModelEndpoint.azureOpenAI] = {
titleConvo: true,
titleModel: 'o1-mini',
titleMethod: 'completion',
streamRate: 35,
modelGroupMap: {
'gpt-4o': {
group: 'eastus',
deploymentName: 'gpt-4o',
},
'o1-mini': {
group: 'region-eastus',
deploymentName: 'o1-mini',
},
'codex-mini': {
group: 'codex-mini',
deploymentName: 'codex-mini',
},
},
groupMap: {
eastus: {
apiKey: '${EASTUS_API_KEY}',
instanceName: 'region-eastus',
version: '2024-02-15-preview',
models: {
'gpt-4o': {
deploymentName: 'gpt-4o',
},
},
},
'region-eastus': {
apiKey: '${EASTUS2_API_KEY}',
instanceName: 'region-eastus2',
version: '2024-12-01-preview',
models: {
'o1-mini': {
deploymentName: 'o1-mini',
},
},
},
'codex-mini': {
apiKey: '${AZURE_API_KEY}',
baseURL: 'https://example.cognitiveservices.azure.com/openai/',
version: '2025-04-01-preview',
serverless: true,
models: {
'codex-mini': {
deploymentName: 'codex-mini',
},
},
},
},
};
mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'o1-mini';
const text = 'Test Azure multi-group conversation';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify correct model and provider are used
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
provider: Providers.AZURE,
titleMethod: 'completion',
}),
);
const generateTitleCall = mockRun.generateTitle.mock.calls[0][0];
expect(generateTitleCall.clientOptions.model).toBe('o1-mini');
expect(generateTitleCall.clientOptions.maxTokens).toBeUndefined(); // o1 models shouldn't have maxTokens
});
it('should use all config as fallback for Azure endpoints', async () => {
// Set up Azure endpoint with minimal config
mockAgent.endpoint = EModelEndpoint.azureOpenAI;
mockAgent.provider = EModelEndpoint.azureOpenAI;
mockReq.body.endpoint = EModelEndpoint.azureOpenAI;
mockReq.body.model = 'gpt-4';
// Remove Azure-specific config
delete mockReq.app.locals[EModelEndpoint.azureOpenAI];
// Set 'all' config as fallback with a serverless Azure config
mockReq.app.locals.all = {
titleConvo: true,
titleModel: 'gpt-4',
titleMethod: 'structured',
titlePrompt: 'Fallback title prompt from all config',
titlePromptTemplate: 'Template: {{content}}',
modelGroupMap: {
'gpt-4': {
group: 'default-group',
deploymentName: 'gpt-4',
},
},
groupMap: {
'default-group': {
apiKey: '${AZURE_API_KEY}',
baseURL: 'https://default.openai.azure.com/',
version: '2024-02-15-preview',
serverless: true,
models: {
'gpt-4': {
deploymentName: 'gpt-4',
},
},
},
},
};
const text = 'Test Azure with all config fallback';
const abortController = new AbortController();
await client.titleConvo({ text, abortController });
// Verify all config is used
expect(mockRun.generateTitle).toHaveBeenCalledWith(
expect.objectContaining({
provider: Providers.OPENAI, // Should be OPENAI when no instanceName
titleMethod: 'structured',
titlePrompt: 'Fallback title prompt from all config',
titlePromptTemplate: 'Template: {{content}}',
}),
);
});
});
});
describe('runMemory method', () => {
let client;
let mockReq;
let mockRes;
let mockAgent;
let mockOptions;
let mockProcessMemory;
beforeEach(() => {
jest.clearAllMocks();
mockAgent = {
id: 'agent-123',
endpoint: EModelEndpoint.openAI,
provider: EModelEndpoint.openAI,
model_parameters: {
model: 'gpt-4',
},
};
mockReq = {
app: {
locals: {
memory: {
messageWindowSize: 3,
},
},
},
user: {
id: 'user-123',
personalization: {
memories: true,
},
},
};
mockRes = {};
mockOptions = {
req: mockReq,
res: mockRes,
agent: mockAgent,
};
mockProcessMemory = jest.fn().mockResolvedValue([]);
client = new AgentClient(mockOptions);
client.processMemory = mockProcessMemory;
client.conversationId = 'convo-123';
client.responseMessageId = 'response-123';
});
it('should filter out image URLs from message content', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'What is in this image?',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==',
detail: 'auto',
},
},
],
}),
new AIMessage('I can see a small red pixel in the image.'),
new HumanMessage({
content: [
{
type: 'text',
text: 'What about this one?',
},
{
type: 'image_url',
image_url: {
url: 'data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/',
detail: 'high',
},
},
],
}),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Verify the buffer message was created
expect(processedMessage.constructor.name).toBe('HumanMessage');
expect(processedMessage.content).toContain('# Current Chat:');
// Verify that image URLs are not in the buffer string
expect(processedMessage.content).not.toContain('image_url');
expect(processedMessage.content).not.toContain('data:image');
expect(processedMessage.content).not.toContain('base64');
// Verify text content is preserved
expect(processedMessage.content).toContain('What is in this image?');
expect(processedMessage.content).toContain('I can see a small red pixel in the image.');
expect(processedMessage.content).toContain('What about this one?');
});
it('should handle messages with only text content', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage('Hello, how are you?'),
new AIMessage('I am doing well, thank you!'),
new HumanMessage('That is great to hear.'),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
expect(processedMessage.content).toContain('Hello, how are you?');
expect(processedMessage.content).toContain('I am doing well, thank you!');
expect(processedMessage.content).toContain('That is great to hear.');
});
it('should handle mixed content types correctly', async () => {
const { HumanMessage } = require('@langchain/core/messages');
const { ContentTypes } = require('librechat-data-provider');
const messages = [
new HumanMessage({
content: [
{
type: 'text',
text: 'Here is some text',
},
{
type: ContentTypes.IMAGE_URL,
image_url: {
url: 'https://example.com/image.png',
},
},
{
type: 'text',
text: ' and more text',
},
],
}),
];
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Should contain text parts but not image URLs
expect(processedMessage.content).toContain('Here is some text');
expect(processedMessage.content).toContain('and more text');
expect(processedMessage.content).not.toContain('example.com/image.png');
expect(processedMessage.content).not.toContain('IMAGE_URL');
});
it('should preserve original messages without mutation', async () => {
const { HumanMessage } = require('@langchain/core/messages');
const originalContent = [
{
type: 'text',
text: 'Original text',
},
{
type: 'image_url',
image_url: {
url: 'data:image/png;base64,ABC123',
},
},
];
const messages = [
new HumanMessage({
content: [...originalContent],
}),
];
await client.runMemory(messages);
// Verify original message wasn't mutated
expect(messages[0].content).toHaveLength(2);
expect(messages[0].content[1].type).toBe('image_url');
expect(messages[0].content[1].image_url.url).toBe('data:image/png;base64,ABC123');
});
it('should handle message window size correctly', async () => {
const { HumanMessage, AIMessage } = require('@langchain/core/messages');
const messages = [
new HumanMessage('Message 1'),
new AIMessage('Response 1'),
new HumanMessage('Message 2'),
new AIMessage('Response 2'),
new HumanMessage('Message 3'),
new AIMessage('Response 3'),
];
// Window size is set to 3 in mockReq
await client.runMemory(messages);
expect(mockProcessMemory).toHaveBeenCalledTimes(1);
const processedMessage = mockProcessMemory.mock.calls[0][0][0];
// Should only include last 3 messages due to window size
expect(processedMessage.content).toContain('Message 3');
expect(processedMessage.content).toContain('Response 3');
expect(processedMessage.content).not.toContain('Message 1');
expect(processedMessage.content).not.toContain('Response 1');
});
it('should return early if processMemory is not set', async () => {
const { HumanMessage } = require('@langchain/core/messages');
client.processMemory = null;
const result = await client.runMemory([new HumanMessage('Test')]);
expect(result).toBeUndefined();
expect(mockProcessMemory).not.toHaveBeenCalled();
});
});
});

View File

@@ -105,8 +105,6 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
return res.end();
}
await cache.delete(cacheKey);
// const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
// logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
}
@@ -115,7 +113,6 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let run;
try {
// run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
@@ -128,18 +125,9 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let finalEvent;
try {
// const errorContentPart = {
// text: {
// value:
// error?.message ?? 'There was an error processing your request. Please try again later.',
// },
// type: ContentTypes.ERROR,
// };
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
// runMessages,
};
} catch (error) {
logger.error(`[${originPath}] Error finalizing error process`, error);

View File

@@ -12,6 +12,7 @@ const { saveMessage } = require('~/models');
const AgentController = async (req, res, next, initializeClient, addTitle) => {
let {
text,
isRegenerate,
endpointOption,
conversationId,
isContinued = false,
@@ -167,6 +168,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
onStart,
getReqData,
isContinued,
isRegenerate,
editedContent,
conversationId,
parentMessageId,

View File

@@ -1,6 +1,8 @@
const { z } = require('zod');
const fs = require('fs').promises;
const { nanoid } = require('nanoid');
const { logger } = require('@librechat/data-schemas');
const { agentCreateSchema, agentUpdateSchema } = require('@librechat/api');
const {
Tools,
Constants,
@@ -8,6 +10,7 @@ const {
SystemRoles,
EToolResources,
actionDelimiter,
removeNullishValues,
} = require('librechat-data-provider');
const {
getAgent,
@@ -30,6 +33,7 @@ const { deleteFileByFilter } = require('~/models/File');
const systemTools = {
[Tools.execute_code]: true,
[Tools.file_search]: true,
[Tools.web_search]: true,
};
/**
@@ -42,9 +46,13 @@ const systemTools = {
*/
const createAgentHandler = async (req, res) => {
try {
const { tools = [], provider, name, description, instructions, model, ...agentData } = req.body;
const validatedData = agentCreateSchema.parse(req.body);
const { tools = [], ...agentData } = removeNullishValues(validatedData);
const { id: userId } = req.user;
agentData.id = `agent_${nanoid()}`;
agentData.author = userId;
agentData.tools = [];
const availableTools = await getCachedTools({ includeGlobal: true });
@@ -58,19 +66,13 @@ const createAgentHandler = async (req, res) => {
}
}
Object.assign(agentData, {
author: userId,
name,
description,
instructions,
provider,
model,
});
agentData.id = `agent_${nanoid()}`;
const agent = await createAgent(agentData);
res.status(201).json(agent);
} catch (error) {
if (error instanceof z.ZodError) {
logger.error('[/Agents] Validation error', error.errors);
return res.status(400).json({ error: 'Invalid request data', details: error.errors });
}
logger.error('[/Agents] Error creating agent', error);
res.status(500).json({ error: error.message });
}
@@ -154,14 +156,16 @@ const getAgentHandler = async (req, res) => {
const updateAgentHandler = async (req, res) => {
try {
const id = req.params.id;
const { projectIds, removeProjectIds, ...updateData } = req.body;
const validatedData = agentUpdateSchema.parse(req.body);
const { projectIds, removeProjectIds, ...updateData } = removeNullishValues(validatedData);
const isAdmin = req.user.role === SystemRoles.ADMIN;
const existingAgent = await getAgent({ id });
const isAuthor = existingAgent.author.toString() === req.user.id;
if (!existingAgent) {
return res.status(404).json({ error: 'Agent not found' });
}
const isAuthor = existingAgent.author.toString() === req.user.id;
const hasEditPermission = existingAgent.isCollaborative || isAdmin || isAuthor;
if (!hasEditPermission) {
@@ -200,6 +204,11 @@ const updateAgentHandler = async (req, res) => {
return res.json(updatedAgent);
} catch (error) {
if (error instanceof z.ZodError) {
logger.error('[/Agents/:id] Validation error', error.errors);
return res.status(400).json({ error: 'Invalid request data', details: error.errors });
}
logger.error('[/Agents/:id] Error updating Agent', error);
if (error.statusCode === 409) {
@@ -382,6 +391,22 @@ const uploadAgentAvatarHandler = async (req, res) => {
return res.status(400).json({ message: 'Agent ID is required' });
}
const isAdmin = req.user.role === SystemRoles.ADMIN;
const existingAgent = await getAgent({ id: agent_id });
if (!existingAgent) {
return res.status(404).json({ error: 'Agent not found' });
}
const isAuthor = existingAgent.author.toString() === req.user.id;
const hasEditPermission = existingAgent.isCollaborative || isAdmin || isAuthor;
if (!hasEditPermission) {
return res.status(403).json({
error: 'You do not have permission to modify this non-collaborative agent',
});
}
const buffer = await fs.readFile(req.file.path);
const fileStrategy = req.app.locals.fileStrategy;
@@ -404,14 +429,7 @@ const uploadAgentAvatarHandler = async (req, res) => {
source: fileStrategy,
};
let _avatar;
try {
const agent = await getAgent({ id: agent_id });
_avatar = agent.avatar;
} catch (error) {
logger.error('[/:agent_id/avatar] Error fetching agent', error);
_avatar = {};
}
let _avatar = existingAgent.avatar;
if (_avatar && _avatar.source) {
const { deleteFile } = getStrategyFunctions(_avatar.source);
@@ -433,7 +451,7 @@ const uploadAgentAvatarHandler = async (req, res) => {
};
promises.push(
await updateAgent({ id: agent_id, author: req.user.id }, data, {
await updateAgent({ id: agent_id }, data, {
updatingUserId: req.user.id,
}),
);

View File

@@ -0,0 +1,659 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { agentSchema } = require('@librechat/data-schemas');
// Only mock the dependencies that are not database-related
jest.mock('~/server/services/Config', () => ({
getCachedTools: jest.fn().mockResolvedValue({
web_search: true,
execute_code: true,
file_search: true,
}),
}));
jest.mock('~/models/Project', () => ({
getProjectByName: jest.fn().mockResolvedValue(null),
}));
jest.mock('~/server/services/Files/strategies', () => ({
getStrategyFunctions: jest.fn(),
}));
jest.mock('~/server/services/Files/images/avatar', () => ({
resizeAvatar: jest.fn(),
}));
jest.mock('~/server/services/Files/S3/crud', () => ({
refreshS3Url: jest.fn(),
}));
jest.mock('~/server/services/Files/process', () => ({
filterFile: jest.fn(),
}));
jest.mock('~/models/Action', () => ({
updateAction: jest.fn(),
getActions: jest.fn().mockResolvedValue([]),
}));
jest.mock('~/models/File', () => ({
deleteFileByFilter: jest.fn(),
}));
const { createAgent: createAgentHandler, updateAgent: updateAgentHandler } = require('./v1');
/**
* @type {import('mongoose').Model<import('@librechat/data-schemas').IAgent>}
*/
let Agent;
describe('Agent Controllers - Mass Assignment Protection', () => {
let mongoServer;
let mockReq;
let mockRes;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
Agent = mongoose.models.Agent || mongoose.model('Agent', agentSchema);
}, 20000);
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
// Reset all mocks
jest.clearAllMocks();
// Setup mock request and response objects
mockReq = {
user: {
id: new mongoose.Types.ObjectId().toString(),
role: 'USER',
},
body: {},
params: {},
app: {
locals: {
fileStrategy: 'local',
},
},
};
mockRes = {
status: jest.fn().mockReturnThis(),
json: jest.fn().mockReturnThis(),
};
});
describe('createAgentHandler', () => {
test('should create agent with allowed fields only', async () => {
const validData = {
name: 'Test Agent',
description: 'A test agent',
instructions: 'Be helpful',
provider: 'openai',
model: 'gpt-4',
tools: ['web_search'],
model_parameters: { temperature: 0.7 },
tool_resources: {
file_search: { file_ids: ['file1', 'file2'] },
},
};
mockReq.body = validData;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
expect(mockRes.json).toHaveBeenCalled();
const createdAgent = mockRes.json.mock.calls[0][0];
expect(createdAgent.name).toBe('Test Agent');
expect(createdAgent.description).toBe('A test agent');
expect(createdAgent.provider).toBe('openai');
expect(createdAgent.model).toBe('gpt-4');
expect(createdAgent.author.toString()).toBe(mockReq.user.id);
expect(createdAgent.tools).toContain('web_search');
// Verify in database
const agentInDb = await Agent.findOne({ id: createdAgent.id });
expect(agentInDb).toBeDefined();
expect(agentInDb.name).toBe('Test Agent');
expect(agentInDb.author.toString()).toBe(mockReq.user.id);
});
test('should reject creation with unauthorized fields (mass assignment protection)', async () => {
const maliciousData = {
// Required fields
provider: 'openai',
model: 'gpt-4',
name: 'Malicious Agent',
// Unauthorized fields that should be stripped
author: new mongoose.Types.ObjectId().toString(), // Should not be able to set author
authorName: 'Hacker', // Should be stripped
isCollaborative: true, // Should be stripped on creation
versions: [], // Should be stripped
_id: new mongoose.Types.ObjectId(), // Should be stripped
id: 'custom_agent_id', // Should be overridden
createdAt: new Date('2020-01-01'), // Should be stripped
updatedAt: new Date('2020-01-01'), // Should be stripped
};
mockReq.body = maliciousData;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
const createdAgent = mockRes.json.mock.calls[0][0];
// Verify unauthorized fields were not set
expect(createdAgent.author.toString()).toBe(mockReq.user.id); // Should be the request user, not the malicious value
expect(createdAgent.authorName).toBeUndefined();
expect(createdAgent.isCollaborative).toBeFalsy();
expect(createdAgent.versions).toHaveLength(1); // Should have exactly 1 version from creation
expect(createdAgent.id).not.toBe('custom_agent_id'); // Should have generated ID
expect(createdAgent.id).toMatch(/^agent_/); // Should have proper prefix
// Verify timestamps are recent (not the malicious dates)
const createdTime = new Date(createdAgent.createdAt).getTime();
const now = Date.now();
expect(now - createdTime).toBeLessThan(5000); // Created within last 5 seconds
// Verify in database
const agentInDb = await Agent.findOne({ id: createdAgent.id });
expect(agentInDb.author.toString()).toBe(mockReq.user.id);
expect(agentInDb.authorName).toBeUndefined();
});
test('should validate required fields', async () => {
const invalidData = {
name: 'Missing Required Fields',
// Missing provider and model
};
mockReq.body = invalidData;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.json).toHaveBeenCalledWith(
expect.objectContaining({
error: 'Invalid request data',
details: expect.any(Array),
}),
);
// Verify nothing was created in database
const count = await Agent.countDocuments();
expect(count).toBe(0);
});
test('should handle tool_resources validation', async () => {
const dataWithInvalidToolResources = {
provider: 'openai',
model: 'gpt-4',
name: 'Agent with Tool Resources',
tool_resources: {
// Valid resources
file_search: {
file_ids: ['file1', 'file2'],
vector_store_ids: ['vs1'],
},
execute_code: {
file_ids: ['file3'],
},
// Invalid resource (should be stripped by schema)
invalid_resource: {
file_ids: ['file4'],
},
},
};
mockReq.body = dataWithInvalidToolResources;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
const createdAgent = mockRes.json.mock.calls[0][0];
expect(createdAgent.tool_resources).toBeDefined();
expect(createdAgent.tool_resources.file_search).toBeDefined();
expect(createdAgent.tool_resources.execute_code).toBeDefined();
expect(createdAgent.tool_resources.invalid_resource).toBeUndefined(); // Should be stripped
// Verify in database
const agentInDb = await Agent.findOne({ id: createdAgent.id });
expect(agentInDb.tool_resources.invalid_resource).toBeUndefined();
});
test('should handle avatar validation', async () => {
const dataWithAvatar = {
provider: 'openai',
model: 'gpt-4',
name: 'Agent with Avatar',
avatar: {
filepath: 'https://example.com/avatar.png',
source: 's3',
},
};
mockReq.body = dataWithAvatar;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
const createdAgent = mockRes.json.mock.calls[0][0];
expect(createdAgent.avatar).toEqual({
filepath: 'https://example.com/avatar.png',
source: 's3',
});
});
test('should handle invalid avatar format', async () => {
const dataWithInvalidAvatar = {
provider: 'openai',
model: 'gpt-4',
name: 'Agent with Invalid Avatar',
avatar: 'just-a-string', // Invalid format
};
mockReq.body = dataWithInvalidAvatar;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.json).toHaveBeenCalledWith(
expect.objectContaining({
error: 'Invalid request data',
}),
);
});
});
describe('updateAgentHandler', () => {
let existingAgentId;
let existingAgentAuthorId;
beforeEach(async () => {
// Create an existing agent for update tests
existingAgentAuthorId = new mongoose.Types.ObjectId();
const agent = await Agent.create({
id: `agent_${uuidv4()}`,
name: 'Original Agent',
provider: 'openai',
model: 'gpt-3.5-turbo',
author: existingAgentAuthorId,
description: 'Original description',
isCollaborative: false,
versions: [
{
name: 'Original Agent',
provider: 'openai',
model: 'gpt-3.5-turbo',
description: 'Original description',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
existingAgentId = agent.id;
});
test('should update agent with allowed fields only', async () => {
mockReq.user.id = existingAgentAuthorId.toString(); // Set as author
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Updated Agent',
description: 'Updated description',
model: 'gpt-4',
isCollaborative: true, // This IS allowed in updates
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.status).not.toHaveBeenCalledWith(400);
expect(mockRes.status).not.toHaveBeenCalledWith(403);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
expect(updatedAgent.name).toBe('Updated Agent');
expect(updatedAgent.description).toBe('Updated description');
expect(updatedAgent.model).toBe('gpt-4');
expect(updatedAgent.isCollaborative).toBe(true);
expect(updatedAgent.author).toBe(existingAgentAuthorId.toString());
// Verify in database
const agentInDb = await Agent.findOne({ id: existingAgentId });
expect(agentInDb.name).toBe('Updated Agent');
expect(agentInDb.isCollaborative).toBe(true);
});
test('should reject update with unauthorized fields (mass assignment protection)', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Updated Name',
// Unauthorized fields that should be stripped
author: new mongoose.Types.ObjectId().toString(), // Should not be able to change author
authorName: 'Hacker', // Should be stripped
id: 'different_agent_id', // Should be stripped
_id: new mongoose.Types.ObjectId(), // Should be stripped
versions: [], // Should be stripped
createdAt: new Date('2020-01-01'), // Should be stripped
updatedAt: new Date('2020-01-01'), // Should be stripped
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
// Verify unauthorized fields were not changed
expect(updatedAgent.author).toBe(existingAgentAuthorId.toString()); // Should not have changed
expect(updatedAgent.authorName).toBeUndefined();
expect(updatedAgent.id).toBe(existingAgentId); // Should not have changed
expect(updatedAgent.name).toBe('Updated Name'); // Only this should have changed
// Verify in database
const agentInDb = await Agent.findOne({ id: existingAgentId });
expect(agentInDb.author.toString()).toBe(existingAgentAuthorId.toString());
expect(agentInDb.id).toBe(existingAgentId);
});
test('should reject update from non-author when not collaborative', async () => {
const differentUserId = new mongoose.Types.ObjectId().toString();
mockReq.user.id = differentUserId; // Different user
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Unauthorized Update',
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(403);
expect(mockRes.json).toHaveBeenCalledWith({
error: 'You do not have permission to modify this non-collaborative agent',
});
// Verify agent was not modified in database
const agentInDb = await Agent.findOne({ id: existingAgentId });
expect(agentInDb.name).toBe('Original Agent');
});
test('should allow update from non-author when collaborative', async () => {
// First make the agent collaborative
await Agent.updateOne({ id: existingAgentId }, { isCollaborative: true });
const differentUserId = new mongoose.Types.ObjectId().toString();
mockReq.user.id = differentUserId; // Different user
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Collaborative Update',
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.status).not.toHaveBeenCalledWith(403);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
expect(updatedAgent.name).toBe('Collaborative Update');
// Author field should be removed for non-author
expect(updatedAgent.author).toBeUndefined();
// Verify in database
const agentInDb = await Agent.findOne({ id: existingAgentId });
expect(agentInDb.name).toBe('Collaborative Update');
});
test('should allow admin to update any agent', async () => {
const adminUserId = new mongoose.Types.ObjectId().toString();
mockReq.user.id = adminUserId;
mockReq.user.role = 'ADMIN'; // Set as admin
mockReq.params.id = existingAgentId;
mockReq.body = {
name: 'Admin Update',
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.status).not.toHaveBeenCalledWith(403);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
expect(updatedAgent.name).toBe('Admin Update');
});
test('should handle projectIds updates', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
const projectId1 = new mongoose.Types.ObjectId().toString();
const projectId2 = new mongoose.Types.ObjectId().toString();
mockReq.body = {
projectIds: [projectId1, projectId2],
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
expect(updatedAgent).toBeDefined();
// Note: updateAgentProjects requires more setup, so we just verify the handler doesn't crash
});
test('should validate tool_resources in updates', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
mockReq.body = {
tool_resources: {
ocr: {
file_ids: ['ocr1', 'ocr2'],
},
execute_code: {
file_ids: ['img1'],
},
// Invalid tool resource
invalid_tool: {
file_ids: ['invalid'],
},
},
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.json).toHaveBeenCalled();
const updatedAgent = mockRes.json.mock.calls[0][0];
expect(updatedAgent.tool_resources).toBeDefined();
expect(updatedAgent.tool_resources.ocr).toBeDefined();
expect(updatedAgent.tool_resources.execute_code).toBeDefined();
expect(updatedAgent.tool_resources.invalid_tool).toBeUndefined();
});
test('should return 404 for non-existent agent', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = `agent_${uuidv4()}`; // Non-existent ID
mockReq.body = {
name: 'Update Non-existent',
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(404);
expect(mockRes.json).toHaveBeenCalledWith({ error: 'Agent not found' });
});
test('should handle validation errors properly', async () => {
mockReq.user.id = existingAgentAuthorId.toString();
mockReq.params.id = existingAgentId;
mockReq.body = {
model_parameters: 'invalid-not-an-object', // Should be an object
};
await updateAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(400);
expect(mockRes.json).toHaveBeenCalledWith(
expect.objectContaining({
error: 'Invalid request data',
details: expect.any(Array),
}),
);
});
});
describe('Mass Assignment Attack Scenarios', () => {
test('should prevent setting system fields during creation', async () => {
const systemFields = {
provider: 'openai',
model: 'gpt-4',
name: 'System Fields Test',
// System fields that should never be settable by users
__v: 99,
_id: new mongoose.Types.ObjectId(),
versions: [
{
name: 'Fake Version',
provider: 'fake',
model: 'fake-model',
},
],
};
mockReq.body = systemFields;
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
const createdAgent = mockRes.json.mock.calls[0][0];
// Verify system fields were not affected
expect(createdAgent.__v).not.toBe(99);
expect(createdAgent.versions).toHaveLength(1); // Should only have the auto-created version
expect(createdAgent.versions[0].name).toBe('System Fields Test'); // From actual creation
expect(createdAgent.versions[0].provider).toBe('openai'); // From actual creation
// Verify in database
const agentInDb = await Agent.findOne({ id: createdAgent.id });
expect(agentInDb.__v).not.toBe(99);
});
test('should prevent privilege escalation through isCollaborative', async () => {
// Create a non-collaborative agent
const authorId = new mongoose.Types.ObjectId();
const agent = await Agent.create({
id: `agent_${uuidv4()}`,
name: 'Private Agent',
provider: 'openai',
model: 'gpt-4',
author: authorId,
isCollaborative: false,
versions: [
{
name: 'Private Agent',
provider: 'openai',
model: 'gpt-4',
createdAt: new Date(),
updatedAt: new Date(),
},
],
});
// Try to make it collaborative as a different user
const attackerId = new mongoose.Types.ObjectId().toString();
mockReq.user.id = attackerId;
mockReq.params.id = agent.id;
mockReq.body = {
isCollaborative: true, // Trying to escalate privileges
};
await updateAgentHandler(mockReq, mockRes);
// Should be rejected
expect(mockRes.status).toHaveBeenCalledWith(403);
// Verify in database that it's still not collaborative
const agentInDb = await Agent.findOne({ id: agent.id });
expect(agentInDb.isCollaborative).toBe(false);
});
test('should prevent author hijacking', async () => {
const originalAuthorId = new mongoose.Types.ObjectId();
const attackerId = new mongoose.Types.ObjectId();
// Admin creates an agent
mockReq.user.id = originalAuthorId.toString();
mockReq.user.role = 'ADMIN';
mockReq.body = {
provider: 'openai',
model: 'gpt-4',
name: 'Admin Agent',
author: attackerId.toString(), // Trying to set different author
};
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
const createdAgent = mockRes.json.mock.calls[0][0];
// Author should be the actual user, not the attempted value
expect(createdAgent.author.toString()).toBe(originalAuthorId.toString());
expect(createdAgent.author.toString()).not.toBe(attackerId.toString());
// Verify in database
const agentInDb = await Agent.findOne({ id: createdAgent.id });
expect(agentInDb.author.toString()).toBe(originalAuthorId.toString());
});
test('should strip unknown fields to prevent future vulnerabilities', async () => {
mockReq.body = {
provider: 'openai',
model: 'gpt-4',
name: 'Future Proof Test',
// Unknown fields that might be added in future
superAdminAccess: true,
bypassAllChecks: true,
internalFlag: 'secret',
futureFeature: 'exploit',
};
await createAgentHandler(mockReq, mockRes);
expect(mockRes.status).toHaveBeenCalledWith(201);
const createdAgent = mockRes.json.mock.calls[0][0];
// Verify unknown fields were stripped
expect(createdAgent.superAdminAccess).toBeUndefined();
expect(createdAgent.bypassAllChecks).toBeUndefined();
expect(createdAgent.internalFlag).toBeUndefined();
expect(createdAgent.futureFeature).toBeUndefined();
// Also check in database
const agentInDb = await Agent.findOne({ id: createdAgent.id }).lean();
expect(agentInDb.superAdminAccess).toBeUndefined();
expect(agentInDb.bypassAllChecks).toBeUndefined();
expect(agentInDb.internalFlag).toBeUndefined();
expect(agentInDb.futureFeature).toBeUndefined();
});
});
});

View File

@@ -152,7 +152,7 @@ const chatV1 = async (req, res) => {
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[/assistants/chat/] Error cancelling run', error);
@@ -162,7 +162,7 @@ const chatV1 = async (req, res) => {
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
await recordUsage({
...run.usage,
model: run.model,
@@ -623,7 +623,7 @@ const chatV1 = async (req, res) => {
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,

View File

@@ -467,7 +467,7 @@ const chatV2 = async (req, res) => {
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
completedRun = await openai.beta.threads.runs.retrieve(response.run.id, { thread_id });
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,

View File

@@ -108,7 +108,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
@@ -118,7 +118,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
await recordUsage({
...run.usage,
model: run.model,

View File

@@ -173,6 +173,16 @@ const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, que
};
};
/**
* Initializes the OpenAI client.
* @param {object} params - The parameters object.
* @param {ServerRequest} params.req - The request object.
* @param {ServerResponse} params.res - The response object.
* @param {TEndpointOption} params.endpointOption - The endpoint options.
* @param {boolean} params.initAppClient - Whether to initialize the app client.
* @param {string} params.overrideEndpoint - The endpoint to override.
* @returns {Promise<{ openai: OpenAIClient, openAIApiKey: string; client: import('~/app/clients/OpenAIClient') }>} - The initialized OpenAI client.
*/
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
const version = await getCurrentVersion(req, endpoint);

View File

@@ -197,7 +197,7 @@ const deleteAssistant = async (req, res) => {
await validateAuthor({ req, openai });
const assistant_id = req.params.id;
const deletionStatus = await openai.beta.assistants.del(assistant_id);
const deletionStatus = await openai.beta.assistants.delete(assistant_id);
if (deletionStatus?.deleted) {
await deleteAssistantActions({ req, assistant_id });
}
@@ -365,7 +365,7 @@ const uploadAssistantAvatar = async (req, res) => {
try {
await fs.unlink(req.file.path);
logger.debug('[/:agent_id/avatar] Temp. image upload file deleted');
} catch (error) {
} catch {
logger.debug('[/:agent_id/avatar] Temp. image upload file already deleted');
}
}

View File

@@ -1,14 +1,13 @@
const { nanoid } = require('nanoid');
const { EnvVar } = require('@librechat/agents');
const { checkAccess } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { checkAccess, loadWebSearchAuth } = require('@librechat/api');
const {
Tools,
AuthType,
Permissions,
ToolCallTypes,
PermissionTypes,
loadWebSearchAuth,
} = require('librechat-data-provider');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
const { processCodeOutput } = require('~/server/services/Files/Code/process');

View File

@@ -16,7 +16,7 @@ const { connectDb, indexSync } = require('~/db');
const validateImageRequest = require('./middleware/validateImageRequest');
const { jwtLogin, ldapLogin, passportLogin } = require('~/strategies');
const errorController = require('./controllers/ErrorController');
const initializeMCP = require('./services/initializeMCP');
const initializeMCPs = require('./services/initializeMCPs');
const configureSocialLogins = require('./socialLogins');
const AppService = require('./services/AppService');
const staticCache = require('./utils/staticCache');
@@ -55,7 +55,6 @@ const startServer = async () => {
/* Middleware */
app.use(noIndex);
app.use(errorController);
app.use(express.json({ limit: '3mb' }));
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
app.use(mongoSanitize());
@@ -121,6 +120,9 @@ const startServer = async () => {
app.use('/api/tags', routes.tags);
app.use('/api/mcp', routes.mcp);
// Add the error controller one more time after all routes
app.use(errorController);
app.use((req, res) => {
res.set({
'Cache-Control': process.env.INDEX_CACHE_CONTROL || 'no-cache, no-store, must-revalidate',
@@ -144,7 +146,7 @@ const startServer = async () => {
logger.info(`Server listening at http://${host == '0.0.0.0' ? 'localhost' : host}:${port}`);
}
initializeMCP(app);
initializeMCPs(app);
});
};

View File

@@ -1,5 +1,4 @@
const fs = require('fs');
const path = require('path');
const request = require('supertest');
const { MongoMemoryServer } = require('mongodb-memory-server');
const mongoose = require('mongoose');
@@ -59,6 +58,30 @@ describe('Server Configuration', () => {
expect(response.headers['pragma']).toBe('no-cache');
expect(response.headers['expires']).toBe('0');
});
it('should return 500 for unknown errors via ErrorController', async () => {
// Testing the error handling here on top of unit tests to ensure the middleware is correctly integrated
// Mock MongoDB operations to fail
const originalFindOne = mongoose.models.User.findOne;
const mockError = new Error('MongoDB operation failed');
mongoose.models.User.findOne = jest.fn().mockImplementation(() => {
throw mockError;
});
try {
const response = await request(app).post('/api/auth/login').send({
email: 'test@example.com',
password: 'password123',
});
expect(response.status).toBe(500);
expect(response.text).toBe('An unknown error occurred.');
} finally {
// Restore original function
mongoose.models.User.findOne = originalFindOne;
}
});
});
// Polls the /health endpoint every 30ms for up to 10 seconds to wait for the server to start completely

View File

@@ -47,7 +47,7 @@ async function abortRun(req, res) {
try {
await cache.set(cacheKey, 'cancelled', three_minutes);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
const cancelledRun = await openai.beta.threads.runs.cancel(run_id, { thread_id });
logger.debug('[abortRun] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[abortRun] Error cancelling run', error);
@@ -60,7 +60,7 @@ async function abortRun(req, res) {
}
try {
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
const run = await openai.beta.threads.runs.retrieve(run_id, { thread_id });
await recordUsage({
...run.usage,
model: run.model,

View File

@@ -1,3 +1,4 @@
const { handleError } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const {
EndpointURLs,
@@ -14,7 +15,6 @@ const openAI = require('~/server/services/Endpoints/openAI');
const agents = require('~/server/services/Endpoints/agents');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google');
const { handleError } = require('~/server/utils');
const buildFunction = {
[EModelEndpoint.openAI]: openAI.buildOptions,

View File

@@ -18,7 +18,6 @@ const message = 'Your account has been temporarily banned due to violations of o
* @function
* @param {Object} req - Express Request object.
* @param {Object} res - Express Response object.
* @param {String} errorMessage - Error message to be displayed in case of /api/ask or /api/edit request.
*
* @returns {Promise<Object>} - Returns a Promise which when resolved sends a response status of 403 with a specific message if request is not of api/ask or api/edit types. If it is, calls `denyRequest()` function.
*/
@@ -135,6 +134,7 @@ const checkBan = async (req, res, next = () => {}) => {
return await banResponse(req, res);
} catch (error) {
logger.error('Error in checkBan middleware:', error);
return next(error);
}
};

View File

@@ -1,4 +1,4 @@
const { Time, CacheKeys } = require('librechat-data-provider');
const { Time, CacheKeys, ViolationTypes } = require('librechat-data-provider');
const clearPendingReq = require('~/cache/clearPendingReq');
const { logViolation, getLogStores } = require('~/cache');
const { isEnabled } = require('~/server/utils');
@@ -37,7 +37,7 @@ const concurrentLimiter = async (req, res, next) => {
const userId = req.user?.id ?? req.user?._id ?? '';
const limit = Math.max(CONCURRENT_MESSAGE_MAX, 1);
const type = 'concurrent';
const type = ViolationTypes.CONCURRENT;
const key = `${isEnabled(USE_REDIS) ? namespace : ''}:${userId}`;
const pendingRequests = +((await cache.get(key)) ?? 0);

View File

@@ -0,0 +1,79 @@
const rateLimit = require('express-rate-limit');
const { ViolationTypes } = require('librechat-data-provider');
const { limiterCache } = require('~/cache/cacheFactory');
const logViolation = require('~/cache/logViolation');
const getEnvironmentVariables = () => {
const FORK_IP_MAX = parseInt(process.env.FORK_IP_MAX) || 30;
const FORK_IP_WINDOW = parseInt(process.env.FORK_IP_WINDOW) || 1;
const FORK_USER_MAX = parseInt(process.env.FORK_USER_MAX) || 7;
const FORK_USER_WINDOW = parseInt(process.env.FORK_USER_WINDOW) || 1;
const FORK_VIOLATION_SCORE = process.env.FORK_VIOLATION_SCORE;
const forkIpWindowMs = FORK_IP_WINDOW * 60 * 1000;
const forkIpMax = FORK_IP_MAX;
const forkIpWindowInMinutes = forkIpWindowMs / 60000;
const forkUserWindowMs = FORK_USER_WINDOW * 60 * 1000;
const forkUserMax = FORK_USER_MAX;
const forkUserWindowInMinutes = forkUserWindowMs / 60000;
return {
forkIpWindowMs,
forkIpMax,
forkIpWindowInMinutes,
forkUserWindowMs,
forkUserMax,
forkUserWindowInMinutes,
forkViolationScore: FORK_VIOLATION_SCORE,
};
};
const createForkHandler = (ip = true) => {
const {
forkIpMax,
forkUserMax,
forkViolationScore,
forkIpWindowInMinutes,
forkUserWindowInMinutes,
} = getEnvironmentVariables();
return async (req, res) => {
const type = ViolationTypes.FILE_UPLOAD_LIMIT;
const errorMessage = {
type,
max: ip ? forkIpMax : forkUserMax,
limiter: ip ? 'ip' : 'user',
windowInMinutes: ip ? forkIpWindowInMinutes : forkUserWindowInMinutes,
};
await logViolation(req, res, type, errorMessage, forkViolationScore);
res.status(429).json({ message: 'Too many conversation fork requests. Try again later' });
};
};
const createForkLimiters = () => {
const { forkIpWindowMs, forkIpMax, forkUserWindowMs, forkUserMax } = getEnvironmentVariables();
const ipLimiterOptions = {
windowMs: forkIpWindowMs,
max: forkIpMax,
handler: createForkHandler(),
store: limiterCache('fork_ip_limiter'),
};
const userLimiterOptions = {
windowMs: forkUserWindowMs,
max: forkUserMax,
handler: createForkHandler(false),
keyGenerator: function (req) {
return req.user?.id;
},
store: limiterCache('fork_user_limiter'),
};
const forkIpLimiter = rateLimit(ipLimiterOptions);
const forkUserLimiter = rateLimit(userLimiterOptions);
return { forkIpLimiter, forkUserLimiter };
};
module.exports = { createForkLimiters };

View File

@@ -1,16 +1,14 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const ioredisClient = require('~/cache/ioredisClient');
const { limiterCache } = require('~/cache/cacheFactory');
const logViolation = require('~/cache/logViolation');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const getEnvironmentVariables = () => {
const IMPORT_IP_MAX = parseInt(process.env.IMPORT_IP_MAX) || 100;
const IMPORT_IP_WINDOW = parseInt(process.env.IMPORT_IP_WINDOW) || 15;
const IMPORT_USER_MAX = parseInt(process.env.IMPORT_USER_MAX) || 50;
const IMPORT_USER_WINDOW = parseInt(process.env.IMPORT_USER_WINDOW) || 15;
const IMPORT_VIOLATION_SCORE = process.env.IMPORT_VIOLATION_SCORE;
const importIpWindowMs = IMPORT_IP_WINDOW * 60 * 1000;
const importIpMax = IMPORT_IP_MAX;
@@ -27,12 +25,18 @@ const getEnvironmentVariables = () => {
importUserWindowMs,
importUserMax,
importUserWindowInMinutes,
importViolationScore: IMPORT_VIOLATION_SCORE,
};
};
const createImportHandler = (ip = true) => {
const { importIpMax, importIpWindowInMinutes, importUserMax, importUserWindowInMinutes } =
getEnvironmentVariables();
const {
importIpMax,
importUserMax,
importViolationScore,
importIpWindowInMinutes,
importUserWindowInMinutes,
} = getEnvironmentVariables();
return async (req, res) => {
const type = ViolationTypes.FILE_UPLOAD_LIMIT;
@@ -43,7 +47,7 @@ const createImportHandler = (ip = true) => {
windowInMinutes: ip ? importIpWindowInMinutes : importUserWindowInMinutes,
};
await logViolation(req, res, type, errorMessage);
await logViolation(req, res, type, errorMessage, importViolationScore);
res.status(429).json({ message: 'Too many conversation import requests. Try again later' });
};
};
@@ -56,6 +60,7 @@ const createImportLimiters = () => {
windowMs: importIpWindowMs,
max: importIpMax,
handler: createImportHandler(),
store: limiterCache('import_ip_limiter'),
};
const userLimiterOptions = {
windowMs: importUserWindowMs,
@@ -64,23 +69,9 @@ const createImportLimiters = () => {
keyGenerator: function (req) {
return req.user?.id; // Use the user ID or NULL if not available
},
store: limiterCache('import_user_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for import rate limiters.');
const sendCommand = (...args) => ioredisClient.call(...args);
const ipStore = new RedisStore({
sendCommand,
prefix: 'import_ip_limiter:',
});
const userStore = new RedisStore({
sendCommand,
prefix: 'import_user_limiter:',
});
ipLimiterOptions.store = ipStore;
userLimiterOptions.store = userStore;
}
const importIpLimiter = rateLimit(ipLimiterOptions);
const importUserLimiter = rateLimit(userLimiterOptions);
return { importIpLimiter, importUserLimiter };

View File

@@ -4,6 +4,7 @@ const createSTTLimiters = require('./sttLimiters');
const loginLimiter = require('./loginLimiter');
const importLimiters = require('./importLimiters');
const uploadLimiters = require('./uploadLimiters');
const forkLimiters = require('./forkLimiters');
const registerLimiter = require('./registerLimiter');
const toolCallLimiter = require('./toolCallLimiter');
const messageLimiters = require('./messageLimiters');
@@ -14,6 +15,7 @@ module.exports = {
...uploadLimiters,
...importLimiters,
...messageLimiters,
...forkLimiters,
loginLimiter,
registerLimiter,
toolCallLimiter,

View File

@@ -1,9 +1,8 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { removePorts, isEnabled } = require('~/server/utils');
const ioredisClient = require('~/cache/ioredisClient');
const { ViolationTypes } = require('librechat-data-provider');
const { removePorts } = require('~/server/utils');
const { limiterCache } = require('~/cache/cacheFactory');
const { logViolation } = require('~/cache');
const { logger } = require('~/config');
const { LOGIN_WINDOW = 5, LOGIN_MAX = 7, LOGIN_VIOLATION_SCORE: score } = process.env;
const windowMs = LOGIN_WINDOW * 60 * 1000;
@@ -12,7 +11,7 @@ const windowInMinutes = windowMs / 60000;
const message = `Too many login attempts, please try again after ${windowInMinutes} minutes.`;
const handler = async (req, res) => {
const type = 'logins';
const type = ViolationTypes.LOGINS;
const errorMessage = {
type,
max,
@@ -28,17 +27,9 @@ const limiterOptions = {
max,
handler,
keyGenerator: removePorts,
store: limiterCache('login_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for login rate limiter.');
const store = new RedisStore({
sendCommand: (...args) => ioredisClient.call(...args),
prefix: 'login_limiter:',
});
limiterOptions.store = store;
}
const loginLimiter = rateLimit(limiterOptions);
module.exports = loginLimiter;

View File

@@ -1,16 +1,15 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const denyRequest = require('~/server/middleware/denyRequest');
const ioredisClient = require('~/cache/ioredisClient');
const { isEnabled } = require('~/server/utils');
const { limiterCache } = require('~/cache/cacheFactory');
const { logViolation } = require('~/cache');
const { logger } = require('~/config');
const {
MESSAGE_IP_MAX = 40,
MESSAGE_IP_WINDOW = 1,
MESSAGE_USER_MAX = 40,
MESSAGE_USER_WINDOW = 1,
MESSAGE_VIOLATION_SCORE: score,
} = process.env;
const ipWindowMs = MESSAGE_IP_WINDOW * 60 * 1000;
@@ -31,7 +30,7 @@ const userWindowInMinutes = userWindowMs / 60000;
*/
const createHandler = (ip = true) => {
return async (req, res) => {
const type = 'message_limit';
const type = ViolationTypes.MESSAGE_LIMIT;
const errorMessage = {
type,
max: ip ? ipMax : userMax,
@@ -39,7 +38,7 @@ const createHandler = (ip = true) => {
windowInMinutes: ip ? ipWindowInMinutes : userWindowInMinutes,
};
await logViolation(req, res, type, errorMessage);
await logViolation(req, res, type, errorMessage, score);
return await denyRequest(req, res, errorMessage);
};
};
@@ -51,6 +50,7 @@ const ipLimiterOptions = {
windowMs: ipWindowMs,
max: ipMax,
handler: createHandler(),
store: limiterCache('message_ip_limiter'),
};
const userLimiterOptions = {
@@ -60,23 +60,9 @@ const userLimiterOptions = {
keyGenerator: function (req) {
return req.user?.id; // Use the user ID or NULL if not available
},
store: limiterCache('message_user_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for message rate limiters.');
const sendCommand = (...args) => ioredisClient.call(...args);
const ipStore = new RedisStore({
sendCommand,
prefix: 'message_ip_limiter:',
});
const userStore = new RedisStore({
sendCommand,
prefix: 'message_user_limiter:',
});
ipLimiterOptions.store = ipStore;
userLimiterOptions.store = userStore;
}
/**
* Message request rate limiter by IP
*/

View File

@@ -1,9 +1,8 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { removePorts, isEnabled } = require('~/server/utils');
const ioredisClient = require('~/cache/ioredisClient');
const { ViolationTypes } = require('librechat-data-provider');
const { removePorts } = require('~/server/utils');
const { limiterCache } = require('~/cache/cacheFactory');
const { logViolation } = require('~/cache');
const { logger } = require('~/config');
const { REGISTER_WINDOW = 60, REGISTER_MAX = 5, REGISTRATION_VIOLATION_SCORE: score } = process.env;
const windowMs = REGISTER_WINDOW * 60 * 1000;
@@ -12,7 +11,7 @@ const windowInMinutes = windowMs / 60000;
const message = `Too many accounts created, please try again after ${windowInMinutes} minutes`;
const handler = async (req, res) => {
const type = 'registrations';
const type = ViolationTypes.REGISTRATIONS;
const errorMessage = {
type,
max,
@@ -28,17 +27,9 @@ const limiterOptions = {
max,
handler,
keyGenerator: removePorts,
store: limiterCache('register_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for register rate limiter.');
const store = new RedisStore({
sendCommand: (...args) => ioredisClient.call(...args),
prefix: 'register_limiter:',
});
limiterOptions.store = store;
}
const registerLimiter = rateLimit(limiterOptions);
module.exports = registerLimiter;

View File

@@ -1,10 +1,8 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const { removePorts, isEnabled } = require('~/server/utils');
const ioredisClient = require('~/cache/ioredisClient');
const { removePorts } = require('~/server/utils');
const { limiterCache } = require('~/cache/cacheFactory');
const { logViolation } = require('~/cache');
const { logger } = require('~/config');
const {
RESET_PASSWORD_WINDOW = 2,
@@ -33,17 +31,9 @@ const limiterOptions = {
max,
handler,
keyGenerator: removePorts,
store: limiterCache('reset_password_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for reset password rate limiter.');
const store = new RedisStore({
sendCommand: (...args) => ioredisClient.call(...args),
prefix: 'reset_password_limiter:',
});
limiterOptions.store = store;
}
const resetPasswordLimiter = rateLimit(limiterOptions);
module.exports = resetPasswordLimiter;

View File

@@ -1,16 +1,14 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const ioredisClient = require('~/cache/ioredisClient');
const { limiterCache } = require('~/cache/cacheFactory');
const logViolation = require('~/cache/logViolation');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const getEnvironmentVariables = () => {
const STT_IP_MAX = parseInt(process.env.STT_IP_MAX) || 100;
const STT_IP_WINDOW = parseInt(process.env.STT_IP_WINDOW) || 1;
const STT_USER_MAX = parseInt(process.env.STT_USER_MAX) || 50;
const STT_USER_WINDOW = parseInt(process.env.STT_USER_WINDOW) || 1;
const STT_VIOLATION_SCORE = process.env.STT_VIOLATION_SCORE;
const sttIpWindowMs = STT_IP_WINDOW * 60 * 1000;
const sttIpMax = STT_IP_MAX;
@@ -27,11 +25,12 @@ const getEnvironmentVariables = () => {
sttUserWindowMs,
sttUserMax,
sttUserWindowInMinutes,
sttViolationScore: STT_VIOLATION_SCORE,
};
};
const createSTTHandler = (ip = true) => {
const { sttIpMax, sttIpWindowInMinutes, sttUserMax, sttUserWindowInMinutes } =
const { sttIpMax, sttIpWindowInMinutes, sttUserMax, sttUserWindowInMinutes, sttViolationScore } =
getEnvironmentVariables();
return async (req, res) => {
@@ -43,7 +42,7 @@ const createSTTHandler = (ip = true) => {
windowInMinutes: ip ? sttIpWindowInMinutes : sttUserWindowInMinutes,
};
await logViolation(req, res, type, errorMessage);
await logViolation(req, res, type, errorMessage, sttViolationScore);
res.status(429).json({ message: 'Too many STT requests. Try again later' });
};
};
@@ -55,6 +54,7 @@ const createSTTLimiters = () => {
windowMs: sttIpWindowMs,
max: sttIpMax,
handler: createSTTHandler(),
store: limiterCache('stt_ip_limiter'),
};
const userLimiterOptions = {
@@ -64,23 +64,9 @@ const createSTTLimiters = () => {
keyGenerator: function (req) {
return req.user?.id; // Use the user ID or NULL if not available
},
store: limiterCache('stt_user_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for STT rate limiters.');
const sendCommand = (...args) => ioredisClient.call(...args);
const ipStore = new RedisStore({
sendCommand,
prefix: 'stt_ip_limiter:',
});
const userStore = new RedisStore({
sendCommand,
prefix: 'stt_user_limiter:',
});
ipLimiterOptions.store = ipStore;
userLimiterOptions.store = userStore;
}
const sttIpLimiter = rateLimit(ipLimiterOptions);
const sttUserLimiter = rateLimit(userLimiterOptions);

View File

@@ -1,10 +1,9 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const ioredisClient = require('~/cache/ioredisClient');
const { limiterCache } = require('~/cache/cacheFactory');
const logViolation = require('~/cache/logViolation');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const { TOOL_CALL_VIOLATION_SCORE: score } = process.env;
const handler = async (req, res) => {
const type = ViolationTypes.TOOL_CALL_LIMIT;
@@ -15,7 +14,7 @@ const handler = async (req, res) => {
windowInMinutes: 1,
};
await logViolation(req, res, type, errorMessage, 0);
await logViolation(req, res, type, errorMessage, score);
res.status(429).json({ message: 'Too many tool call requests. Try again later' });
};
@@ -26,17 +25,9 @@ const limiterOptions = {
keyGenerator: function (req) {
return req.user?.id;
},
store: limiterCache('tool_call_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for tool call rate limiter.');
const store = new RedisStore({
sendCommand: (...args) => ioredisClient.call(...args),
prefix: 'tool_call_limiter:',
});
limiterOptions.store = store;
}
const toolCallLimiter = rateLimit(limiterOptions);
module.exports = toolCallLimiter;

View File

@@ -1,16 +1,14 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const ioredisClient = require('~/cache/ioredisClient');
const logViolation = require('~/cache/logViolation');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const { limiterCache } = require('~/cache/cacheFactory');
const getEnvironmentVariables = () => {
const TTS_IP_MAX = parseInt(process.env.TTS_IP_MAX) || 100;
const TTS_IP_WINDOW = parseInt(process.env.TTS_IP_WINDOW) || 1;
const TTS_USER_MAX = parseInt(process.env.TTS_USER_MAX) || 50;
const TTS_USER_WINDOW = parseInt(process.env.TTS_USER_WINDOW) || 1;
const TTS_VIOLATION_SCORE = process.env.TTS_VIOLATION_SCORE;
const ttsIpWindowMs = TTS_IP_WINDOW * 60 * 1000;
const ttsIpMax = TTS_IP_MAX;
@@ -27,11 +25,12 @@ const getEnvironmentVariables = () => {
ttsUserWindowMs,
ttsUserMax,
ttsUserWindowInMinutes,
ttsViolationScore: TTS_VIOLATION_SCORE,
};
};
const createTTSHandler = (ip = true) => {
const { ttsIpMax, ttsIpWindowInMinutes, ttsUserMax, ttsUserWindowInMinutes } =
const { ttsIpMax, ttsIpWindowInMinutes, ttsUserMax, ttsUserWindowInMinutes, ttsViolationScore } =
getEnvironmentVariables();
return async (req, res) => {
@@ -43,7 +42,7 @@ const createTTSHandler = (ip = true) => {
windowInMinutes: ip ? ttsIpWindowInMinutes : ttsUserWindowInMinutes,
};
await logViolation(req, res, type, errorMessage);
await logViolation(req, res, type, errorMessage, ttsViolationScore);
res.status(429).json({ message: 'Too many TTS requests. Try again later' });
};
};
@@ -55,32 +54,19 @@ const createTTSLimiters = () => {
windowMs: ttsIpWindowMs,
max: ttsIpMax,
handler: createTTSHandler(),
store: limiterCache('tts_ip_limiter'),
};
const userLimiterOptions = {
windowMs: ttsUserWindowMs,
max: ttsUserMax,
handler: createTTSHandler(false),
store: limiterCache('tts_user_limiter'),
keyGenerator: function (req) {
return req.user?.id; // Use the user ID or NULL if not available
},
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for TTS rate limiters.');
const sendCommand = (...args) => ioredisClient.call(...args);
const ipStore = new RedisStore({
sendCommand,
prefix: 'tts_ip_limiter:',
});
const userStore = new RedisStore({
sendCommand,
prefix: 'tts_user_limiter:',
});
ipLimiterOptions.store = ipStore;
userLimiterOptions.store = userStore;
}
const ttsIpLimiter = rateLimit(ipLimiterOptions);
const ttsUserLimiter = rateLimit(userLimiterOptions);

View File

@@ -1,16 +1,14 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const ioredisClient = require('~/cache/ioredisClient');
const { limiterCache } = require('~/cache/cacheFactory');
const logViolation = require('~/cache/logViolation');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const getEnvironmentVariables = () => {
const FILE_UPLOAD_IP_MAX = parseInt(process.env.FILE_UPLOAD_IP_MAX) || 100;
const FILE_UPLOAD_IP_WINDOW = parseInt(process.env.FILE_UPLOAD_IP_WINDOW) || 15;
const FILE_UPLOAD_USER_MAX = parseInt(process.env.FILE_UPLOAD_USER_MAX) || 50;
const FILE_UPLOAD_USER_WINDOW = parseInt(process.env.FILE_UPLOAD_USER_WINDOW) || 15;
const FILE_UPLOAD_VIOLATION_SCORE = process.env.FILE_UPLOAD_VIOLATION_SCORE;
const fileUploadIpWindowMs = FILE_UPLOAD_IP_WINDOW * 60 * 1000;
const fileUploadIpMax = FILE_UPLOAD_IP_MAX;
@@ -27,6 +25,7 @@ const getEnvironmentVariables = () => {
fileUploadUserWindowMs,
fileUploadUserMax,
fileUploadUserWindowInMinutes,
fileUploadViolationScore: FILE_UPLOAD_VIOLATION_SCORE,
};
};
@@ -36,6 +35,7 @@ const createFileUploadHandler = (ip = true) => {
fileUploadIpWindowInMinutes,
fileUploadUserMax,
fileUploadUserWindowInMinutes,
fileUploadViolationScore,
} = getEnvironmentVariables();
return async (req, res) => {
@@ -47,7 +47,7 @@ const createFileUploadHandler = (ip = true) => {
windowInMinutes: ip ? fileUploadIpWindowInMinutes : fileUploadUserWindowInMinutes,
};
await logViolation(req, res, type, errorMessage);
await logViolation(req, res, type, errorMessage, fileUploadViolationScore);
res.status(429).json({ message: 'Too many file upload requests. Try again later' });
};
};
@@ -60,6 +60,7 @@ const createFileLimiters = () => {
windowMs: fileUploadIpWindowMs,
max: fileUploadIpMax,
handler: createFileUploadHandler(),
store: limiterCache('file_upload_ip_limiter'),
};
const userLimiterOptions = {
@@ -69,23 +70,9 @@ const createFileLimiters = () => {
keyGenerator: function (req) {
return req.user?.id; // Use the user ID or NULL if not available
},
store: limiterCache('file_upload_user_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for file upload rate limiters.');
const sendCommand = (...args) => ioredisClient.call(...args);
const ipStore = new RedisStore({
sendCommand,
prefix: 'file_upload_ip_limiter:',
});
const userStore = new RedisStore({
sendCommand,
prefix: 'file_upload_user_limiter:',
});
ipLimiterOptions.store = ipStore;
userLimiterOptions.store = userStore;
}
const fileUploadIpLimiter = rateLimit(ipLimiterOptions);
const fileUploadUserLimiter = rateLimit(userLimiterOptions);

View File

@@ -1,10 +1,8 @@
const rateLimit = require('express-rate-limit');
const { RedisStore } = require('rate-limit-redis');
const { ViolationTypes } = require('librechat-data-provider');
const { removePorts, isEnabled } = require('~/server/utils');
const ioredisClient = require('~/cache/ioredisClient');
const { removePorts } = require('~/server/utils');
const { limiterCache } = require('~/cache/cacheFactory');
const { logViolation } = require('~/cache');
const { logger } = require('~/config');
const {
VERIFY_EMAIL_WINDOW = 2,
@@ -33,17 +31,9 @@ const limiterOptions = {
max,
handler,
keyGenerator: removePorts,
store: limiterCache('verify_email_limiter'),
};
if (isEnabled(process.env.USE_REDIS) && ioredisClient) {
logger.debug('Using Redis for verify email rate limiter.');
const store = new RedisStore({
sendCommand: (...args) => ioredisClient.call(...args),
prefix: 'verify_email_limiter:',
});
limiterOptions.store = store;
}
const verifyEmailLimiter = rateLimit(limiterOptions);
module.exports = verifyEmailLimiter;

View File

@@ -1,5 +1,6 @@
const uap = require('ua-parser-js');
const { handleError } = require('../utils');
const { ViolationTypes } = require('librechat-data-provider');
const { handleError } = require('@librechat/api');
const { logViolation } = require('../../cache');
/**
@@ -21,7 +22,7 @@ async function uaParser(req, res, next) {
const ua = uap(req.headers['user-agent']);
if (!ua.browser.name) {
const type = 'non_browser';
const type = ViolationTypes.NON_BROWSER;
await logViolation(req, res, type, { type }, score);
return handleError(res, { message: 'Illegal request' });
}

View File

@@ -1,4 +1,4 @@
const { handleError } = require('../utils');
const { handleError } = require('@librechat/api');
function validateEndpoint(req, res, next) {
const { endpoint: _endpoint, endpointType } = req.body;

View File

@@ -1,6 +1,6 @@
const { handleError } = require('@librechat/api');
const { ViolationTypes } = require('librechat-data-provider');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { handleError } = require('~/server/utils');
const { logViolation } = require('~/cache');
/**
* Validates the model of the request.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,162 @@
const fs = require('fs');
const path = require('path');
const express = require('express');
const request = require('supertest');
const zlib = require('zlib');
// Create test setup
const mockTestDir = path.join(__dirname, 'test-static-route');
// Mock the paths module to point to our test directory
jest.mock('~/config/paths', () => ({
imageOutput: mockTestDir,
}));
describe('Static Route Integration', () => {
let app;
let staticRoute;
let testDir;
let testImagePath;
beforeAll(() => {
// Create a test directory and files
testDir = mockTestDir;
testImagePath = path.join(testDir, 'test-image.jpg');
if (!fs.existsSync(testDir)) {
fs.mkdirSync(testDir, { recursive: true });
}
// Create a test image file
fs.writeFileSync(testImagePath, 'fake-image-data');
// Create a gzipped version of the test image (for gzip scanning tests)
fs.writeFileSync(testImagePath + '.gz', zlib.gzipSync('fake-image-data'));
});
afterAll(() => {
// Clean up test files
if (fs.existsSync(testDir)) {
fs.rmSync(testDir, { recursive: true, force: true });
}
});
// Helper function to set up static route with specific config
const setupStaticRoute = (skipGzipScan = false) => {
if (skipGzipScan) {
delete process.env.ENABLE_IMAGE_OUTPUT_GZIP_SCAN;
} else {
process.env.ENABLE_IMAGE_OUTPUT_GZIP_SCAN = 'true';
}
staticRoute = require('../static');
app.use('/images', staticRoute);
};
beforeEach(() => {
// Clear the module cache to get fresh imports
jest.resetModules();
app = express();
// Clear environment variables
delete process.env.ENABLE_IMAGE_OUTPUT_GZIP_SCAN;
delete process.env.NODE_ENV;
});
describe('route functionality', () => {
it('should serve static image files', async () => {
process.env.NODE_ENV = 'production';
setupStaticRoute();
const response = await request(app).get('/images/test-image.jpg').expect(200);
expect(response.body.toString()).toBe('fake-image-data');
});
it('should return 404 for non-existent files', async () => {
setupStaticRoute();
const response = await request(app).get('/images/nonexistent.jpg');
expect(response.status).toBe(404);
});
});
describe('cache behavior', () => {
it('should set cache headers for images in production', async () => {
process.env.NODE_ENV = 'production';
setupStaticRoute();
const response = await request(app).get('/images/test-image.jpg').expect(200);
expect(response.headers['cache-control']).toBe('public, max-age=172800, s-maxage=86400');
});
it('should not set cache headers in development', async () => {
process.env.NODE_ENV = 'development';
setupStaticRoute();
const response = await request(app).get('/images/test-image.jpg').expect(200);
// Our middleware should not set the production cache-control header in development
expect(response.headers['cache-control']).not.toBe('public, max-age=172800, s-maxage=86400');
});
});
describe('gzip compression behavior', () => {
beforeEach(() => {
process.env.NODE_ENV = 'production';
});
it('should serve gzipped files when gzip scanning is enabled', async () => {
setupStaticRoute(false); // Enable gzip scanning
const response = await request(app)
.get('/images/test-image.jpg')
.set('Accept-Encoding', 'gzip')
.expect(200);
expect(response.headers['content-encoding']).toBe('gzip');
expect(response.body.toString()).toBe('fake-image-data');
});
it('should not serve gzipped files when gzip scanning is disabled', async () => {
setupStaticRoute(true); // Disable gzip scanning
const response = await request(app)
.get('/images/test-image.jpg')
.set('Accept-Encoding', 'gzip')
.expect(200);
expect(response.headers['content-encoding']).toBeUndefined();
expect(response.body.toString()).toBe('fake-image-data');
});
});
describe('path configuration', () => {
it('should use the configured imageOutput path', async () => {
setupStaticRoute();
const response = await request(app).get('/images/test-image.jpg').expect(200);
expect(response.body.toString()).toBe('fake-image-data');
});
it('should serve from subdirectories', async () => {
// Create a subdirectory with a file
const subDir = path.join(testDir, 'thumbs');
fs.mkdirSync(subDir, { recursive: true });
const thumbPath = path.join(subDir, 'thumb.jpg');
fs.writeFileSync(thumbPath, 'thumbnail-data');
setupStaticRoute();
const response = await request(app).get('/images/thumbs/thumb.jpg').expect(200);
expect(response.body.toString()).toBe('thumbnail-data');
// Clean up
fs.rmSync(subDir, { recursive: true, force: true });
});
});
});

View File

@@ -1,10 +1,11 @@
const express = require('express');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, defaultSocialLogins, Constants } = require('librechat-data-provider');
const { getCustomConfig } = require('~/server/services/Config/getCustomConfig');
const { getLdapConfig } = require('~/server/services/Config/ldap');
const { getProjectByName } = require('~/models/Project');
const { isEnabled } = require('~/server/utils');
const { getMCPManager } = require('~/config');
const { getLogStores } = require('~/cache');
const router = express.Router();
@@ -102,10 +103,16 @@ router.get('/', async function (req, res) {
payload.mcpServers = {};
const config = await getCustomConfig();
if (config?.mcpServers != null) {
const mcpManager = getMCPManager();
const oauthServers = mcpManager.getOAuthServers();
for (const serverName in config.mcpServers) {
const serverConfig = config.mcpServers[serverName];
payload.mcpServers[serverName] = {
customUserVars: serverConfig?.customUserVars || {},
chatMenu: serverConfig?.chatMenu,
isOAuth: oauthServers.has(serverName),
startup: serverConfig?.startup,
};
}
}

View File

@@ -1,16 +1,17 @@
const multer = require('multer');
const express = require('express');
const { sleep } = require('@librechat/agents');
const { isEnabled } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { getConvosByCursor, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
const { forkConversation, duplicateConversation } = require('~/server/utils/import/fork');
const { createImportLimiters, createForkLimiters } = require('~/server/middleware');
const { storage, importFileFilter } = require('~/server/routes/files/multer');
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
const { importConversations } = require('~/server/utils/import');
const { createImportLimiters } = require('~/server/middleware');
const { deleteToolCalls } = require('~/models/ToolCall');
const { isEnabled, sleep } = require('~/server/utils');
const getLogStores = require('~/cache/getLogStores');
const { logger } = require('~/config');
const assistantClients = {
[EModelEndpoint.azureAssistants]: require('~/server/services/Endpoints/azureAssistants'),
@@ -43,6 +44,7 @@ router.get('/', async (req, res) => {
});
res.status(200).json(result);
} catch (error) {
logger.error('Error fetching conversations', error);
res.status(500).json({ error: 'Error fetching conversations' });
}
});
@@ -109,7 +111,7 @@ router.delete('/', async (req, res) => {
/** @type {{ openai: OpenAI }} */
const { openai } = await assistantClients[endpoint].initializeClient({ req, res });
try {
const response = await openai.beta.threads.del(thread_id);
const response = await openai.beta.threads.delete(thread_id);
logger.debug('Deleted OpenAI thread:', response);
} catch (error) {
logger.error('Error deleting OpenAI thread:', error);
@@ -156,6 +158,7 @@ router.post('/update', async (req, res) => {
});
const { importIpLimiter, importUserLimiter } = createImportLimiters();
const { forkIpLimiter, forkUserLimiter } = createForkLimiters();
const upload = multer({ storage: storage, fileFilter: importFileFilter });
/**
@@ -189,7 +192,7 @@ router.post(
* @param {express.Response<TForkConvoResponse>} res - Express response object.
* @returns {Promise<void>} - The response after forking the conversation.
*/
router.post('/fork', async (req, res) => {
router.post('/fork', forkIpLimiter, forkUserLimiter, async (req, res) => {
try {
/** @type {TForkConvoRequest} */
const { conversationId, messageId, option, splitAtTarget, latestMessageId } = req.body;

View File

@@ -0,0 +1,282 @@
const express = require('express');
const request = require('supertest');
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
// Mock dependencies
jest.mock('~/server/services/Files/process', () => ({
processDeleteRequest: jest.fn().mockResolvedValue({}),
filterFile: jest.fn(),
processFileUpload: jest.fn(),
processAgentFileUpload: jest.fn(),
}));
jest.mock('~/server/services/Files/strategies', () => ({
getStrategyFunctions: jest.fn(() => ({})),
}));
jest.mock('~/server/controllers/assistants/helpers', () => ({
getOpenAIClient: jest.fn(),
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
jest.mock('~/server/services/Files/S3/crud', () => ({
refreshS3FileUrls: jest.fn(),
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(() => ({
get: jest.fn(),
set: jest.fn(),
})),
}));
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
warn: jest.fn(),
debug: jest.fn(),
},
}));
const { createFile } = require('~/models/File');
const { createAgent } = require('~/models/Agent');
const { getProjectByName } = require('~/models/Project');
// Import the router after mocks
const router = require('./files');
describe('File Routes - Agent Files Endpoint', () => {
let app;
let mongoServer;
let authorId;
let otherUserId;
let agentId;
let fileId1;
let fileId2;
let fileId3;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
await mongoose.connect(mongoServer.getUri());
// Initialize models
require('~/db/models');
app = express();
app.use(express.json());
// Mock authentication middleware
app.use((req, res, next) => {
req.user = { id: otherUserId || 'default-user' };
req.app = { locals: {} };
next();
});
app.use('/files', router);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
jest.clearAllMocks();
// Clear database
const collections = mongoose.connection.collections;
for (const key in collections) {
await collections[key].deleteMany({});
}
authorId = new mongoose.Types.ObjectId().toString();
otherUserId = new mongoose.Types.ObjectId().toString();
agentId = uuidv4();
fileId1 = uuidv4();
fileId2 = uuidv4();
fileId3 = uuidv4();
// Create files
await createFile({
user: authorId,
file_id: fileId1,
filename: 'agent-file1.txt',
filepath: `/uploads/${authorId}/${fileId1}`,
bytes: 1024,
type: 'text/plain',
});
await createFile({
user: authorId,
file_id: fileId2,
filename: 'agent-file2.txt',
filepath: `/uploads/${authorId}/${fileId2}`,
bytes: 2048,
type: 'text/plain',
});
await createFile({
user: otherUserId,
file_id: fileId3,
filename: 'user-file.txt',
filepath: `/uploads/${otherUserId}/${fileId3}`,
bytes: 512,
type: 'text/plain',
});
// Create an agent with files attached
await createAgent({
id: agentId,
name: 'Test Agent',
author: authorId,
model: 'gpt-4',
provider: 'openai',
isCollaborative: true,
tool_resources: {
file_search: {
file_ids: [fileId1, fileId2],
},
},
});
// Share the agent globally
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, '_id');
if (globalProject) {
const { updateAgent } = require('~/models/Agent');
await updateAgent({ id: agentId }, { projectIds: [globalProject._id] });
}
});
describe('GET /files/agent/:agent_id', () => {
it('should return files accessible through the agent for non-author', async () => {
const response = await request(app).get(`/files/agent/${agentId}`);
expect(response.status).toBe(200);
expect(response.body).toHaveLength(2); // Only agent files, not user-owned files
const fileIds = response.body.map((f) => f.file_id);
expect(fileIds).toContain(fileId1);
expect(fileIds).toContain(fileId2);
expect(fileIds).not.toContain(fileId3); // User's own file not included
});
it('should return 400 when agent_id is not provided', async () => {
const response = await request(app).get('/files/agent/');
expect(response.status).toBe(404); // Express returns 404 for missing route parameter
});
it('should return empty array for non-existent agent', async () => {
const response = await request(app).get('/files/agent/non-existent-agent');
expect(response.status).toBe(200);
expect(response.body).toEqual([]); // Empty array for non-existent agent
});
it('should return empty array when agent is not collaborative', async () => {
// Create a non-collaborative agent
const nonCollabAgentId = uuidv4();
await createAgent({
id: nonCollabAgentId,
name: 'Non-Collaborative Agent',
author: authorId,
model: 'gpt-4',
provider: 'openai',
isCollaborative: false,
tool_resources: {
file_search: {
file_ids: [fileId1],
},
},
});
// Share it globally
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, '_id');
if (globalProject) {
const { updateAgent } = require('~/models/Agent');
await updateAgent({ id: nonCollabAgentId }, { projectIds: [globalProject._id] });
}
const response = await request(app).get(`/files/agent/${nonCollabAgentId}`);
expect(response.status).toBe(200);
expect(response.body).toEqual([]); // Empty array when not collaborative
});
it('should return agent files for agent author', async () => {
// Create a new app instance with author authentication
const authorApp = express();
authorApp.use(express.json());
authorApp.use((req, res, next) => {
req.user = { id: authorId };
req.app = { locals: {} };
next();
});
authorApp.use('/files', router);
const response = await request(authorApp).get(`/files/agent/${agentId}`);
expect(response.status).toBe(200);
expect(response.body).toHaveLength(2); // Agent files for author
const fileIds = response.body.map((f) => f.file_id);
expect(fileIds).toContain(fileId1);
expect(fileIds).toContain(fileId2);
expect(fileIds).not.toContain(fileId3); // User's own file not included
});
it('should return files uploaded by other users to shared agent for author', async () => {
// Create a file uploaded by another user
const otherUserFileId = uuidv4();
const anotherUserId = new mongoose.Types.ObjectId().toString();
await createFile({
user: anotherUserId,
file_id: otherUserFileId,
filename: 'other-user-file.txt',
filepath: `/uploads/${anotherUserId}/${otherUserFileId}`,
bytes: 4096,
type: 'text/plain',
});
// Update agent to include the file uploaded by another user
const { updateAgent } = require('~/models/Agent');
await updateAgent(
{ id: agentId },
{
tool_resources: {
file_search: {
file_ids: [fileId1, fileId2, otherUserFileId],
},
},
},
);
// Create app instance with author authentication
const authorApp = express();
authorApp.use(express.json());
authorApp.use((req, res, next) => {
req.user = { id: authorId };
req.app = { locals: {} };
next();
});
authorApp.use('/files', router);
const response = await request(authorApp).get(`/files/agent/${agentId}`);
expect(response.status).toBe(200);
expect(response.body).toHaveLength(3); // Including file from another user
const fileIds = response.body.map((f) => f.file_id);
expect(fileIds).toContain(fileId1);
expect(fileIds).toContain(fileId2);
expect(fileIds).toContain(otherUserFileId); // File uploaded by another user
});
});
});

View File

@@ -5,6 +5,7 @@ const {
Time,
isUUID,
CacheKeys,
Constants,
FileSources,
EModelEndpoint,
isAgentsEndpoint,
@@ -16,11 +17,12 @@ const {
processDeleteRequest,
processAgentFileUpload,
} = require('~/server/services/Files/process');
const { getFiles, batchUpdateFiles, hasAccessToFilesViaAgent } = require('~/models/File');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { refreshS3FileUrls } = require('~/server/services/Files/S3/crud');
const { getFiles, batchUpdateFiles } = require('~/models/File');
const { getProjectByName } = require('~/models/Project');
const { getAssistant } = require('~/models/Assistant');
const { getAgent } = require('~/models/Agent');
const { getLogStores } = require('~/cache');
@@ -50,6 +52,68 @@ router.get('/', async (req, res) => {
}
});
/**
* Get files specific to an agent
* @route GET /files/agent/:agent_id
* @param {string} agent_id - The agent ID to get files for
* @returns {Promise<TFile[]>} Array of files attached to the agent
*/
router.get('/agent/:agent_id', async (req, res) => {
try {
const { agent_id } = req.params;
const userId = req.user.id;
if (!agent_id) {
return res.status(400).json({ error: 'Agent ID is required' });
}
// Get the agent to check ownership and attached files
const agent = await getAgent({ id: agent_id });
if (!agent) {
// No agent found, return empty array
return res.status(200).json([]);
}
// Check if user has access to the agent
if (agent.author.toString() !== userId) {
// Non-authors need the agent to be globally shared and collaborative
const globalProject = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, '_id');
if (
!globalProject ||
!agent.projectIds.some((pid) => pid.toString() === globalProject._id.toString()) ||
!agent.isCollaborative
) {
return res.status(200).json([]);
}
}
// Collect all file IDs from agent's tool resources
const agentFileIds = [];
if (agent.tool_resources) {
for (const [, resource] of Object.entries(agent.tool_resources)) {
if (resource?.file_ids && Array.isArray(resource.file_ids)) {
agentFileIds.push(...resource.file_ids);
}
}
}
// If no files attached to agent, return empty array
if (agentFileIds.length === 0) {
return res.status(200).json([]);
}
// Get only the files attached to this agent
const files = await getFiles({ file_id: { $in: agentFileIds } }, null, { text: 0 });
res.status(200).json(files);
} catch (error) {
logger.error('[/files/agent/:agent_id] Error fetching agent files:', error);
res.status(500).json({ error: 'Failed to fetch agent files' });
}
});
router.get('/config', async (req, res) => {
try {
res.status(200).json(req.app.locals.fileConfig);
@@ -86,11 +150,62 @@ router.delete('/', async (req, res) => {
const fileIds = files.map((file) => file.file_id);
const dbFiles = await getFiles({ file_id: { $in: fileIds } });
const unauthorizedFiles = dbFiles.filter((file) => file.user.toString() !== req.user.id);
const ownedFiles = [];
const nonOwnedFiles = [];
const fileMap = new Map();
for (const file of dbFiles) {
fileMap.set(file.file_id, file);
if (file.user.toString() === req.user.id) {
ownedFiles.push(file);
} else {
nonOwnedFiles.push(file);
}
}
// If all files are owned by the user, no need for further checks
if (nonOwnedFiles.length === 0) {
await processDeleteRequest({ req, files: ownedFiles });
logger.debug(
`[/files] Files deleted successfully: ${ownedFiles
.filter((f) => f.file_id)
.map((f) => f.file_id)
.join(', ')}`,
);
res.status(200).json({ message: 'Files deleted successfully' });
return;
}
// Check access for non-owned files
let authorizedFiles = [...ownedFiles];
let unauthorizedFiles = [];
if (req.body.agent_id && nonOwnedFiles.length > 0) {
// Batch check access for all non-owned files
const nonOwnedFileIds = nonOwnedFiles.map((f) => f.file_id);
const accessMap = await hasAccessToFilesViaAgent(
req.user.id,
nonOwnedFileIds,
req.body.agent_id,
);
// Separate authorized and unauthorized files
for (const file of nonOwnedFiles) {
if (accessMap.get(file.file_id)) {
authorizedFiles.push(file);
} else {
unauthorizedFiles.push(file);
}
}
} else {
// No agent context, all non-owned files are unauthorized
unauthorizedFiles = nonOwnedFiles;
}
if (unauthorizedFiles.length > 0) {
return res.status(403).json({
message: 'You can only delete your own files',
message: 'You can only delete files you have access to',
unauthorizedFiles: unauthorizedFiles.map((f) => f.file_id),
});
}
@@ -131,10 +246,10 @@ router.delete('/', async (req, res) => {
.json({ message: 'File associations removed successfully from Azure Assistant' });
}
await processDeleteRequest({ req, files: dbFiles });
await processDeleteRequest({ req, files: authorizedFiles });
logger.debug(
`[/files] Files deleted successfully: ${files
`[/files] Files deleted successfully: ${authorizedFiles
.filter((f) => f.file_id)
.map((f) => f.file_id)
.join(', ')}`,
@@ -285,7 +400,8 @@ router.post('/', async (req, res) => {
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result')
error.message?.includes('No OCR result') ||
error.message?.includes('exceeds token limit')
) {
message = error.message;
}

View File

@@ -0,0 +1,302 @@
const express = require('express');
const request = require('supertest');
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
// Mock dependencies
jest.mock('~/server/services/Files/process', () => ({
processDeleteRequest: jest.fn().mockResolvedValue({}),
filterFile: jest.fn(),
processFileUpload: jest.fn(),
processAgentFileUpload: jest.fn(),
}));
jest.mock('~/server/services/Files/strategies', () => ({
getStrategyFunctions: jest.fn(() => ({})),
}));
jest.mock('~/server/controllers/assistants/helpers', () => ({
getOpenAIClient: jest.fn(),
}));
jest.mock('~/server/services/Tools/credentials', () => ({
loadAuthValues: jest.fn(),
}));
jest.mock('~/server/services/Files/S3/crud', () => ({
refreshS3FileUrls: jest.fn(),
}));
jest.mock('~/cache', () => ({
getLogStores: jest.fn(() => ({
get: jest.fn(),
set: jest.fn(),
})),
}));
jest.mock('~/config', () => ({
logger: {
error: jest.fn(),
warn: jest.fn(),
debug: jest.fn(),
},
}));
const { createFile } = require('~/models/File');
const { createAgent } = require('~/models/Agent');
const { getProjectByName } = require('~/models/Project');
const { processDeleteRequest } = require('~/server/services/Files/process');
// Import the router after mocks
const router = require('./files');
describe('File Routes - Delete with Agent Access', () => {
let app;
let mongoServer;
let authorId;
let otherUserId;
let agentId;
let fileId;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
await mongoose.connect(mongoServer.getUri());
// Initialize models
require('~/db/models');
app = express();
app.use(express.json());
// Mock authentication middleware
app.use((req, res, next) => {
req.user = { id: otherUserId || 'default-user' };
req.app = { locals: {} };
next();
});
app.use('/files', router);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
jest.clearAllMocks();
// Clear database
const collections = mongoose.connection.collections;
for (const key in collections) {
await collections[key].deleteMany({});
}
authorId = new mongoose.Types.ObjectId().toString();
otherUserId = new mongoose.Types.ObjectId().toString();
fileId = uuidv4();
// Create a file owned by the author
await createFile({
user: authorId,
file_id: fileId,
filename: 'test.txt',
filepath: `/uploads/${authorId}/${fileId}`,
bytes: 1024,
type: 'text/plain',
});
// Create an agent with the file attached
const agent = await createAgent({
id: uuidv4(),
name: 'Test Agent',
author: authorId,
model: 'gpt-4',
provider: 'openai',
isCollaborative: true,
tool_resources: {
file_search: {
file_ids: [fileId],
},
},
});
agentId = agent.id;
// Share the agent globally
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, '_id');
if (globalProject) {
const { updateAgent } = require('~/models/Agent');
await updateAgent({ id: agentId }, { projectIds: [globalProject._id] });
}
});
describe('DELETE /files', () => {
it('should allow deleting files owned by the user', async () => {
// Create a file owned by the current user
const userFileId = uuidv4();
await createFile({
user: otherUserId,
file_id: userFileId,
filename: 'user-file.txt',
filepath: `/uploads/${otherUserId}/${userFileId}`,
bytes: 1024,
type: 'text/plain',
});
const response = await request(app)
.delete('/files')
.send({
files: [
{
file_id: userFileId,
filepath: `/uploads/${otherUserId}/${userFileId}`,
},
],
});
expect(response.status).toBe(200);
expect(response.body.message).toBe('Files deleted successfully');
expect(processDeleteRequest).toHaveBeenCalled();
});
it('should prevent deleting files not owned by user without agent context', async () => {
const response = await request(app)
.delete('/files')
.send({
files: [
{
file_id: fileId,
filepath: `/uploads/${authorId}/${fileId}`,
},
],
});
expect(response.status).toBe(403);
expect(response.body.message).toBe('You can only delete files you have access to');
expect(response.body.unauthorizedFiles).toContain(fileId);
expect(processDeleteRequest).not.toHaveBeenCalled();
});
it('should allow deleting files accessible through shared agent', async () => {
const response = await request(app)
.delete('/files')
.send({
agent_id: agentId,
files: [
{
file_id: fileId,
filepath: `/uploads/${authorId}/${fileId}`,
},
],
});
expect(response.status).toBe(200);
expect(response.body.message).toBe('Files deleted successfully');
expect(processDeleteRequest).toHaveBeenCalled();
});
it('should prevent deleting files not attached to the specified agent', async () => {
// Create another file not attached to the agent
const unattachedFileId = uuidv4();
await createFile({
user: authorId,
file_id: unattachedFileId,
filename: 'unattached.txt',
filepath: `/uploads/${authorId}/${unattachedFileId}`,
bytes: 1024,
type: 'text/plain',
});
const response = await request(app)
.delete('/files')
.send({
agent_id: agentId,
files: [
{
file_id: unattachedFileId,
filepath: `/uploads/${authorId}/${unattachedFileId}`,
},
],
});
expect(response.status).toBe(403);
expect(response.body.message).toBe('You can only delete files you have access to');
expect(response.body.unauthorizedFiles).toContain(unattachedFileId);
});
it('should handle mixed authorized and unauthorized files', async () => {
// Create a file owned by the current user
const userFileId = uuidv4();
await createFile({
user: otherUserId,
file_id: userFileId,
filename: 'user-file.txt',
filepath: `/uploads/${otherUserId}/${userFileId}`,
bytes: 1024,
type: 'text/plain',
});
// Create an unauthorized file
const unauthorizedFileId = uuidv4();
await createFile({
user: authorId,
file_id: unauthorizedFileId,
filename: 'unauthorized.txt',
filepath: `/uploads/${authorId}/${unauthorizedFileId}`,
bytes: 1024,
type: 'text/plain',
});
const response = await request(app)
.delete('/files')
.send({
agent_id: agentId,
files: [
{
file_id: fileId, // Authorized through agent
filepath: `/uploads/${authorId}/${fileId}`,
},
{
file_id: userFileId, // Owned by user
filepath: `/uploads/${otherUserId}/${userFileId}`,
},
{
file_id: unauthorizedFileId, // Not authorized
filepath: `/uploads/${authorId}/${unauthorizedFileId}`,
},
],
});
expect(response.status).toBe(403);
expect(response.body.message).toBe('You can only delete files you have access to');
expect(response.body.unauthorizedFiles).toContain(unauthorizedFileId);
expect(response.body.unauthorizedFiles).not.toContain(fileId);
expect(response.body.unauthorizedFiles).not.toContain(userFileId);
});
it('should prevent deleting files when agent is not collaborative', async () => {
// Update the agent to be non-collaborative
const { updateAgent } = require('~/models/Agent');
await updateAgent({ id: agentId }, { isCollaborative: false });
const response = await request(app)
.delete('/files')
.send({
agent_id: agentId,
files: [
{
file_id: fileId,
filepath: `/uploads/${authorId}/${fileId}`,
},
],
});
expect(response.status).toBe(403);
expect(response.body.message).toBe('You can only delete files you have access to');
expect(response.body.unauthorizedFiles).toContain(fileId);
expect(processDeleteRequest).not.toHaveBeenCalled();
});
});
});

View File

@@ -28,6 +28,18 @@ router.post('/', async (req, res) => {
} catch (error) {
// TODO: delete remote file if it exists
logger.error('[/files/images] Error processing file:', error);
let message = 'Error processing file';
// Handle specific error types
if (
error.message?.includes('Invalid file format') ||
error.message?.includes('No OCR result') ||
error.message?.includes('exceeds token limit')
) {
message = error.message;
}
try {
const filepath = path.join(
req.app.locals.paths.imageOutput,
@@ -38,7 +50,7 @@ router.post('/', async (req, res) => {
} catch (error) {
logger.error('[/files/images] Error deleting file:', error);
}
res.status(500).json({ message: 'Error processing file' });
res.status(500).json({ message });
} finally {
try {
await fs.unlink(req.file.path);

View File

@@ -1,9 +1,13 @@
const { Router } = require('express');
const { MCPOAuthHandler } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { CacheKeys } = require('librechat-data-provider');
const { MCPOAuthHandler } = require('@librechat/api');
const { CacheKeys, Constants } = require('librechat-data-provider');
const { findToken, updateToken, createToken, deleteTokens } = require('~/models');
const { setCachedTools, getCachedTools, loadCustomConfig } = require('~/server/services/Config');
const { getMCPSetupData, getServerConnectionStatus } = require('~/server/services/MCP');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { getMCPManager, getFlowStateManager } = require('~/config');
const { requireJwtAuth } = require('~/server/middleware');
const { getFlowStateManager } = require('~/config');
const { getLogStores } = require('~/cache');
const router = Router();
@@ -89,7 +93,6 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
return res.redirect('/oauth/error?error=missing_state');
}
// Extract flow ID from state
const flowId = state;
logger.debug('[MCP OAuth] Using flow ID from state', { flowId });
@@ -112,14 +115,68 @@ router.get('/:serverName/oauth/callback', async (req, res) => {
hasCodeVerifier: !!flowState.codeVerifier,
});
// Complete the OAuth flow
logger.debug('[MCP OAuth] Completing OAuth flow');
const tokens = await MCPOAuthHandler.completeOAuthFlow(flowId, code, flowManager);
logger.info('[MCP OAuth] OAuth flow completed, tokens received in callback route');
// For system-level OAuth, we need to store the tokens and retry the connection
if (flowState.userId === 'system') {
logger.debug(`[MCP OAuth] System-level OAuth completed for ${serverName}`);
try {
const mcpManager = getMCPManager(flowState.userId);
logger.debug(`[MCP OAuth] Attempting to reconnect ${serverName} with new OAuth tokens`);
if (flowState.userId !== 'system') {
const user = { id: flowState.userId };
const userConnection = await mcpManager.getUserConnection({
user,
serverName,
flowManager,
tokenMethods: {
findToken,
updateToken,
createToken,
deleteTokens,
},
});
logger.info(
`[MCP OAuth] Successfully reconnected ${serverName} for user ${flowState.userId}`,
);
const userTools = (await getCachedTools({ userId: flowState.userId })) || {};
const mcpDelimiter = Constants.mcp_delimiter;
for (const key of Object.keys(userTools)) {
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
delete userTools[key];
}
}
const tools = await userConnection.fetchTools();
for (const tool of tools) {
const name = `${tool.name}${Constants.mcp_delimiter}${serverName}`;
userTools[name] = {
type: 'function',
['function']: {
name,
description: tool.description,
parameters: tool.inputSchema,
},
};
}
await setCachedTools(userTools, { userId: flowState.userId });
logger.debug(
`[MCP OAuth] Cached ${tools.length} tools for ${serverName} user ${flowState.userId}`,
);
} else {
logger.debug(`[MCP OAuth] System-level OAuth completed for ${serverName}`);
}
} catch (error) {
logger.warn(
`[MCP OAuth] Failed to reconnect ${serverName} after OAuth, but tokens are saved:`,
error,
);
}
/** ID of the flow that the tool/connection is waiting for */
@@ -151,7 +208,6 @@ router.get('/oauth/tokens/:flowId', requireJwtAuth, async (req, res) => {
return res.status(401).json({ error: 'User not authenticated' });
}
// Allow system flows or user-owned flows
if (!flowId.startsWith(`${user.id}:`) && !flowId.startsWith('system:')) {
return res.status(403).json({ error: 'Access denied' });
}
@@ -202,4 +258,338 @@ router.get('/oauth/status/:flowId', async (req, res) => {
}
});
/**
* Cancel OAuth flow
* This endpoint cancels a pending OAuth flow
*/
router.post('/oauth/cancel/:serverName', requireJwtAuth, async (req, res) => {
try {
const { serverName } = req.params;
const user = req.user;
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
logger.info(`[MCP OAuth Cancel] Cancelling OAuth flow for ${serverName} by user ${user.id}`);
const flowsCache = getLogStores(CacheKeys.FLOWS);
const flowManager = getFlowStateManager(flowsCache);
const flowId = MCPOAuthHandler.generateFlowId(user.id, serverName);
const flowState = await flowManager.getFlowState(flowId, 'mcp_oauth');
if (!flowState) {
logger.debug(`[MCP OAuth Cancel] No active flow found for ${serverName}`);
return res.json({
success: true,
message: 'No active OAuth flow to cancel',
});
}
await flowManager.failFlow(flowId, 'mcp_oauth', 'User cancelled OAuth flow');
logger.info(`[MCP OAuth Cancel] Successfully cancelled OAuth flow for ${serverName}`);
res.json({
success: true,
message: `OAuth flow for ${serverName} cancelled successfully`,
});
} catch (error) {
logger.error('[MCP OAuth Cancel] Failed to cancel OAuth flow', error);
res.status(500).json({ error: 'Failed to cancel OAuth flow' });
}
});
/**
* Reinitialize MCP server
* This endpoint allows reinitializing a specific MCP server
*/
router.post('/:serverName/reinitialize', requireJwtAuth, async (req, res) => {
try {
const { serverName } = req.params;
const user = req.user;
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
logger.info(`[MCP Reinitialize] Reinitializing server: ${serverName}`);
const printConfig = false;
const config = await loadCustomConfig(printConfig);
if (!config || !config.mcpServers || !config.mcpServers[serverName]) {
return res.status(404).json({
error: `MCP server '${serverName}' not found in configuration`,
});
}
const flowsCache = getLogStores(CacheKeys.FLOWS);
const flowManager = getFlowStateManager(flowsCache);
const mcpManager = getMCPManager();
await mcpManager.disconnectServer(serverName);
logger.info(`[MCP Reinitialize] Disconnected existing server: ${serverName}`);
const serverConfig = config.mcpServers[serverName];
mcpManager.mcpConfigs[serverName] = serverConfig;
let customUserVars = {};
if (serverConfig.customUserVars && typeof serverConfig.customUserVars === 'object') {
for (const varName of Object.keys(serverConfig.customUserVars)) {
try {
const value = await getUserPluginAuthValue(user.id, varName, false);
customUserVars[varName] = value;
} catch (err) {
logger.error(`[MCP Reinitialize] Error fetching ${varName} for user ${user.id}:`, err);
}
}
}
let userConnection = null;
let oauthRequired = false;
let oauthUrl = null;
try {
userConnection = await mcpManager.getUserConnection({
user,
serverName,
flowManager,
customUserVars,
tokenMethods: {
findToken,
updateToken,
createToken,
deleteTokens,
},
returnOnOAuth: true,
oauthStart: async (authURL) => {
logger.info(`[MCP Reinitialize] OAuth URL received: ${authURL}`);
oauthUrl = authURL;
oauthRequired = true;
},
});
logger.info(`[MCP Reinitialize] Successfully established connection for ${serverName}`);
} catch (err) {
logger.info(`[MCP Reinitialize] getUserConnection threw error: ${err.message}`);
logger.info(
`[MCP Reinitialize] OAuth state - oauthRequired: ${oauthRequired}, oauthUrl: ${oauthUrl ? 'present' : 'null'}`,
);
const isOAuthError =
err.message?.includes('OAuth') ||
err.message?.includes('authentication') ||
err.message?.includes('401');
const isOAuthFlowInitiated = err.message === 'OAuth flow initiated - return early';
if (isOAuthError || oauthRequired || isOAuthFlowInitiated) {
logger.info(
`[MCP Reinitialize] OAuth required for ${serverName} (isOAuthError: ${isOAuthError}, oauthRequired: ${oauthRequired}, isOAuthFlowInitiated: ${isOAuthFlowInitiated})`,
);
oauthRequired = true;
} else {
logger.error(
`[MCP Reinitialize] Error initializing MCP server ${serverName} for user:`,
err,
);
return res.status(500).json({ error: 'Failed to reinitialize MCP server for user' });
}
}
if (userConnection && !oauthRequired) {
const userTools = (await getCachedTools({ userId: user.id })) || {};
const mcpDelimiter = Constants.mcp_delimiter;
for (const key of Object.keys(userTools)) {
if (key.endsWith(`${mcpDelimiter}${serverName}`)) {
delete userTools[key];
}
}
const tools = await userConnection.fetchTools();
for (const tool of tools) {
const name = `${tool.name}${Constants.mcp_delimiter}${serverName}`;
userTools[name] = {
type: 'function',
['function']: {
name,
description: tool.description,
parameters: tool.inputSchema,
},
};
}
await setCachedTools(userTools, { userId: user.id });
}
logger.debug(
`[MCP Reinitialize] Sending response for ${serverName} - oauthRequired: ${oauthRequired}, oauthUrl: ${oauthUrl ? 'present' : 'null'}`,
);
const getResponseMessage = () => {
if (oauthRequired) {
return `MCP server '${serverName}' ready for OAuth authentication`;
}
if (userConnection) {
return `MCP server '${serverName}' reinitialized successfully`;
}
return `Failed to reinitialize MCP server '${serverName}'`;
};
res.json({
success: (userConnection && !oauthRequired) || (oauthRequired && oauthUrl),
message: getResponseMessage(),
serverName,
oauthRequired,
oauthUrl,
});
} catch (error) {
logger.error('[MCP Reinitialize] Unexpected error', error);
res.status(500).json({ error: 'Internal server error' });
}
});
/**
* Get connection status for all MCP servers
* This endpoint returns all app level and user-scoped connection statuses from MCPManager without disconnecting idle connections
*/
router.get('/connection/status', requireJwtAuth, async (req, res) => {
try {
const user = req.user;
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
const { mcpConfig, appConnections, userConnections, oauthServers } = await getMCPSetupData(
user.id,
);
const connectionStatus = {};
for (const [serverName] of Object.entries(mcpConfig)) {
connectionStatus[serverName] = await getServerConnectionStatus(
user.id,
serverName,
appConnections,
userConnections,
oauthServers,
);
}
res.json({
success: true,
connectionStatus,
});
} catch (error) {
if (error.message === 'MCP config not found') {
return res.status(404).json({ error: error.message });
}
logger.error('[MCP Connection Status] Failed to get connection status', error);
res.status(500).json({ error: 'Failed to get connection status' });
}
});
/**
* Get connection status for a single MCP server
* This endpoint returns the connection status for a specific server for a given user
*/
router.get('/connection/status/:serverName', requireJwtAuth, async (req, res) => {
try {
const user = req.user;
const { serverName } = req.params;
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
const { mcpConfig, appConnections, userConnections, oauthServers } = await getMCPSetupData(
user.id,
);
if (!mcpConfig[serverName]) {
return res
.status(404)
.json({ error: `MCP server '${serverName}' not found in configuration` });
}
const serverStatus = await getServerConnectionStatus(
user.id,
serverName,
appConnections,
userConnections,
oauthServers,
);
res.json({
success: true,
serverName,
connectionStatus: serverStatus.connectionState,
requiresOAuth: serverStatus.requiresOAuth,
});
} catch (error) {
if (error.message === 'MCP config not found') {
return res.status(404).json({ error: error.message });
}
logger.error(
`[MCP Per-Server Status] Failed to get connection status for ${req.params.serverName}`,
error,
);
res.status(500).json({ error: 'Failed to get connection status' });
}
});
/**
* Check which authentication values exist for a specific MCP server
* This endpoint returns only boolean flags indicating if values are set, not the actual values
*/
router.get('/:serverName/auth-values', requireJwtAuth, async (req, res) => {
try {
const { serverName } = req.params;
const user = req.user;
if (!user?.id) {
return res.status(401).json({ error: 'User not authenticated' });
}
const printConfig = false;
const config = await loadCustomConfig(printConfig);
if (!config || !config.mcpServers || !config.mcpServers[serverName]) {
return res.status(404).json({
error: `MCP server '${serverName}' not found in configuration`,
});
}
const serverConfig = config.mcpServers[serverName];
const pluginKey = `${Constants.mcp_prefix}${serverName}`;
const authValueFlags = {};
if (serverConfig.customUserVars && typeof serverConfig.customUserVars === 'object') {
for (const varName of Object.keys(serverConfig.customUserVars)) {
try {
const value = await getUserPluginAuthValue(user.id, varName, false, pluginKey);
authValueFlags[varName] = !!(value && value.length > 0);
} catch (err) {
logger.error(
`[MCP Auth Value Flags] Error checking ${varName} for user ${user.id}:`,
err,
);
authValueFlags[varName] = false;
}
}
}
res.json({
success: true,
serverName,
authValueFlags,
});
} catch (error) {
logger.error(
`[MCP Auth Value Flags] Failed to check auth value flags for ${req.params.serverName}`,
error,
);
res.status(500).json({ error: 'Failed to check auth value flags' });
}
});
module.exports = router;

View File

@@ -172,40 +172,68 @@ router.patch('/preferences', checkMemoryOptOut, async (req, res) => {
/**
* PATCH /memories/:key
* Updates the value of an existing memory entry for the authenticated user.
* Body: { value: string }
* Body: { key?: string, value: string }
* Returns 200 and { updated: true, memory: <updatedDoc> } when successful.
*/
router.patch('/:key', checkMemoryUpdate, async (req, res) => {
const { key } = req.params;
const { value } = req.body || {};
const { key: urlKey } = req.params;
const { key: bodyKey, value } = req.body || {};
if (typeof value !== 'string' || value.trim() === '') {
return res.status(400).json({ error: 'Value is required and must be a non-empty string.' });
}
// Use the key from the body if provided, otherwise use the key from the URL
const newKey = bodyKey || urlKey;
try {
const tokenCount = Tokenizer.getTokenCount(value, 'o200k_base');
const memories = await getAllUserMemories(req.user.id);
const existingMemory = memories.find((m) => m.key === key);
const existingMemory = memories.find((m) => m.key === urlKey);
if (!existingMemory) {
return res.status(404).json({ error: 'Memory not found.' });
}
const result = await setMemory({
userId: req.user.id,
key,
value,
tokenCount,
});
// If the key is changing, we need to handle it specially
if (newKey !== urlKey) {
const keyExists = memories.find((m) => m.key === newKey);
if (keyExists) {
return res.status(409).json({ error: 'Memory with this key already exists.' });
}
if (!result.ok) {
return res.status(500).json({ error: 'Failed to update memory.' });
const createResult = await createMemory({
userId: req.user.id,
key: newKey,
value,
tokenCount,
});
if (!createResult.ok) {
return res.status(500).json({ error: 'Failed to create new memory.' });
}
const deleteResult = await deleteMemory({ userId: req.user.id, key: urlKey });
if (!deleteResult.ok) {
return res.status(500).json({ error: 'Failed to delete old memory.' });
}
} else {
// Key is not changing, just update the value
const result = await setMemory({
userId: req.user.id,
key: newKey,
value,
tokenCount,
});
if (!result.ok) {
return res.status(500).json({ error: 'Failed to update memory.' });
}
}
const updatedMemories = await getAllUserMemories(req.user.id);
const updatedMemory = updatedMemories.find((m) => m.key === key);
const updatedMemory = updatedMemories.find((m) => m.key === newKey);
res.json({ updated: true, memory: updatedMemory });
} catch (error) {

View File

@@ -1,8 +1,11 @@
const express = require('express');
const staticCache = require('../utils/staticCache');
const paths = require('~/config/paths');
const { isEnabled } = require('~/server/utils');
const skipGzipScan = !isEnabled(process.env.ENABLE_IMAGE_OUTPUT_GZIP_SCAN);
const router = express.Router();
router.use(staticCache(paths.imageOutput));
router.use(staticCache(paths.imageOutput, { skipGzipScan }));
module.exports = router;

View File

@@ -1,12 +1,11 @@
const { agentsConfigSetup, loadWebSearchConfig } = require('@librechat/api');
const {
FileSources,
loadOCRConfig,
EModelEndpoint,
loadMemoryConfig,
getConfigDefaults,
loadWebSearchConfig,
} = require('librechat-data-provider');
const { agentsConfigSetup } = require('@librechat/api');
const {
checkHealth,
checkConfig,
@@ -158,6 +157,10 @@ const AppService = async (app) => {
}
});
if (endpoints?.all) {
endpointLocals.all = endpoints.all;
}
app.locals = {
...defaultLocals,
fileConfig: config?.fileConfig,

View File

@@ -152,12 +152,14 @@ describe('AppService', () => {
filteredTools: undefined,
includedTools: undefined,
webSearch: {
safeSearch: 1,
jinaApiKey: '${JINA_API_KEY}',
cohereApiKey: '${COHERE_API_KEY}',
serperApiKey: '${SERPER_API_KEY}',
searxngApiKey: '${SEARXNG_API_KEY}',
firecrawlApiKey: '${FIRECRAWL_API_KEY}',
firecrawlApiUrl: '${FIRECRAWL_API_URL}',
jinaApiKey: '${JINA_API_KEY}',
safeSearch: 1,
serperApiKey: '${SERPER_API_KEY}',
searxngInstanceUrl: '${SEARXNG_INSTANCE_URL}',
},
memory: undefined,
agents: {
@@ -541,6 +543,206 @@ describe('AppService', () => {
expect(process.env.IMPORT_USER_MAX).toEqual('initialUserMax');
expect(process.env.IMPORT_USER_WINDOW).toEqual('initialUserWindow');
});
it('should correctly configure endpoint with titlePrompt, titleMethod, and titlePromptTemplate', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.openAI]: {
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
titleMethod: 'structured',
titlePrompt: 'Custom title prompt for conversation',
titlePromptTemplate: 'Summarize this conversation: {{conversation}}',
},
[EModelEndpoint.assistants]: {
titleMethod: 'functions',
titlePrompt: 'Generate a title for this assistant conversation',
titlePromptTemplate: 'Assistant conversation template: {{messages}}',
},
[EModelEndpoint.azureOpenAI]: {
groups: azureGroups,
titleConvo: true,
titleMethod: 'completion',
titleModel: 'gpt-4',
titlePrompt: 'Azure title prompt',
titlePromptTemplate: 'Azure conversation: {{context}}',
},
},
}),
);
await AppService(app);
// Check OpenAI endpoint configuration
expect(app.locals).toHaveProperty(EModelEndpoint.openAI);
expect(app.locals[EModelEndpoint.openAI]).toEqual(
expect.objectContaining({
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
titleMethod: 'structured',
titlePrompt: 'Custom title prompt for conversation',
titlePromptTemplate: 'Summarize this conversation: {{conversation}}',
}),
);
// Check Assistants endpoint configuration
expect(app.locals).toHaveProperty(EModelEndpoint.assistants);
expect(app.locals[EModelEndpoint.assistants]).toMatchObject({
titleMethod: 'functions',
titlePrompt: 'Generate a title for this assistant conversation',
titlePromptTemplate: 'Assistant conversation template: {{messages}}',
});
// Check Azure OpenAI endpoint configuration
expect(app.locals).toHaveProperty(EModelEndpoint.azureOpenAI);
expect(app.locals[EModelEndpoint.azureOpenAI]).toEqual(
expect.objectContaining({
titleConvo: true,
titleMethod: 'completion',
titleModel: 'gpt-4',
titlePrompt: 'Azure title prompt',
titlePromptTemplate: 'Azure conversation: {{context}}',
}),
);
});
it('should configure Agent endpoint with title generation settings', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.agents]: {
disableBuilder: false,
titleConvo: true,
titleModel: 'gpt-4',
titleMethod: 'structured',
titlePrompt: 'Generate a descriptive title for this agent conversation',
titlePromptTemplate: 'Agent conversation summary: {{content}}',
recursionLimit: 15,
capabilities: [AgentCapabilities.tools, AgentCapabilities.actions],
},
},
}),
);
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toMatchObject({
disableBuilder: false,
titleConvo: true,
titleModel: 'gpt-4',
titleMethod: 'structured',
titlePrompt: 'Generate a descriptive title for this agent conversation',
titlePromptTemplate: 'Agent conversation summary: {{content}}',
recursionLimit: 15,
capabilities: expect.arrayContaining([AgentCapabilities.tools, AgentCapabilities.actions]),
});
});
it('should handle missing title configuration options with defaults', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.openAI]: {
titleConvo: true,
// titlePrompt and titlePromptTemplate are not provided
},
},
}),
);
await AppService(app);
expect(app.locals).toHaveProperty(EModelEndpoint.openAI);
expect(app.locals[EModelEndpoint.openAI]).toMatchObject({
titleConvo: true,
});
// Check that the optional fields are undefined when not provided
expect(app.locals[EModelEndpoint.openAI].titlePrompt).toBeUndefined();
expect(app.locals[EModelEndpoint.openAI].titlePromptTemplate).toBeUndefined();
expect(app.locals[EModelEndpoint.openAI].titleMethod).toBeUndefined();
});
it('should correctly configure titleEndpoint when specified', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
[EModelEndpoint.openAI]: {
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
titleEndpoint: EModelEndpoint.anthropic,
titlePrompt: 'Generate a concise title',
},
[EModelEndpoint.agents]: {
titleEndpoint: 'custom-provider',
titleMethod: 'structured',
},
},
}),
);
await AppService(app);
// Check OpenAI endpoint has titleEndpoint
expect(app.locals).toHaveProperty(EModelEndpoint.openAI);
expect(app.locals[EModelEndpoint.openAI]).toMatchObject({
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
titleEndpoint: EModelEndpoint.anthropic,
titlePrompt: 'Generate a concise title',
});
// Check Agents endpoint has titleEndpoint
expect(app.locals).toHaveProperty(EModelEndpoint.agents);
expect(app.locals[EModelEndpoint.agents]).toMatchObject({
titleEndpoint: 'custom-provider',
titleMethod: 'structured',
});
});
it('should correctly configure all endpoint when specified', async () => {
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
Promise.resolve({
endpoints: {
all: {
titleConvo: true,
titleModel: 'gpt-4o-mini',
titleMethod: 'structured',
titlePrompt: 'Default title prompt for all endpoints',
titlePromptTemplate: 'Default template: {{conversation}}',
titleEndpoint: EModelEndpoint.anthropic,
streamRate: 50,
},
[EModelEndpoint.openAI]: {
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
},
},
}),
);
await AppService(app);
// Check that 'all' endpoint config is loaded
expect(app.locals).toHaveProperty('all');
expect(app.locals.all).toMatchObject({
titleConvo: true,
titleModel: 'gpt-4o-mini',
titleMethod: 'structured',
titlePrompt: 'Default title prompt for all endpoints',
titlePromptTemplate: 'Default template: {{conversation}}',
titleEndpoint: EModelEndpoint.anthropic,
streamRate: 50,
});
// Check that OpenAI endpoint has its own config
expect(app.locals).toHaveProperty(EModelEndpoint.openAI);
expect(app.locals[EModelEndpoint.openAI]).toMatchObject({
titleConvo: true,
titleModel: 'gpt-3.5-turbo',
});
});
});
describe('AppService updating app.locals and issuing warnings', () => {

View File

@@ -281,7 +281,7 @@ function createInProgressHandler(openai, thread_id, messages) {
openai.seenCompletedMessages.add(message_id);
const message = await openai.beta.threads.messages.retrieve(thread_id, message_id);
const message = await openai.beta.threads.messages.retrieve(message_id, { thread_id });
if (!message?.content?.length) {
return;
}
@@ -435,9 +435,11 @@ async function runAssistant({
};
});
const outputs = await processRequiredActions(openai, actions);
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.thread_id, run.id, outputs);
const tool_outputs = await processRequiredActions(openai, actions);
const toolRun = await openai.beta.threads.runs.submitToolOutputs(run.id, {
thread_id: run.thread_id,
tool_outputs,
});
// Recursive call with accumulated steps and messages
return await runAssistant({

View File

@@ -1,10 +1,8 @@
const { logger } = require('@librechat/data-schemas');
const { getUserMCPAuthMap } = require('@librechat/api');
const { isEnabled, getUserMCPAuthMap } = require('@librechat/api');
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { normalizeEndpointName, isEnabled } = require('~/server/utils');
const { normalizeEndpointName } = require('~/server/utils');
const loadCustomConfig = require('./loadCustomConfig');
const { getCachedTools } = require('./getCachedTools');
const { findPluginAuthsByKeys } = require('~/models');
const getLogStores = require('~/cache/getLogStores');
/**
@@ -13,8 +11,8 @@ const getLogStores = require('~/cache/getLogStores');
* @returns {Promise<TCustomConfig | null>}
* */
async function getCustomConfig() {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
return (await cache.get(CacheKeys.CUSTOM_CONFIG)) || (await loadCustomConfig());
const cache = getLogStores(CacheKeys.STATIC_CONFIG);
return (await cache.get(CacheKeys.LIBRECHAT_YAML_CONFIG)) || (await loadCustomConfig());
}
/**
@@ -55,46 +53,44 @@ const getCustomEndpointConfig = async (endpoint) => {
);
};
async function createGetMCPAuthMap() {
/**
* @param {Object} params
* @param {string} params.userId
* @param {GenericTool[]} [params.tools]
* @param {import('@librechat/data-schemas').PluginAuthMethods['findPluginAuthsByKeys']} params.findPluginAuthsByKeys
* @returns {Promise<Record<string, Record<string, string>> | undefined>}
*/
async function getMCPAuthMap({ userId, tools, findPluginAuthsByKeys }) {
try {
if (!tools || tools.length === 0) {
return;
}
return await getUserMCPAuthMap({
tools,
userId,
findPluginAuthsByKeys,
});
} catch (err) {
logger.error(
`[api/server/controllers/agents/client.js #chatCompletion] Error getting custom user vars for agent`,
err,
);
}
}
/**
* @returns {Promise<boolean>}
*/
async function hasCustomUserVars() {
const customConfig = await getCustomConfig();
const mcpServers = customConfig?.mcpServers;
const hasCustomUserVars = Object.values(mcpServers ?? {}).some((server) => server.customUserVars);
if (!hasCustomUserVars) {
return;
}
/**
* @param {Object} params
* @param {GenericTool[]} [params.tools]
* @param {string} params.userId
* @returns {Promise<Record<string, Record<string, string>> | undefined>}
*/
return async function ({ tools, userId }) {
try {
if (!tools || tools.length === 0) {
return;
}
const appTools = await getCachedTools({
userId,
});
return await getUserMCPAuthMap({
tools,
userId,
appTools,
findPluginAuthsByKeys,
});
} catch (err) {
logger.error(
`[api/server/controllers/agents/client.js #chatCompletion] Error getting custom user vars for agent`,
err,
);
}
};
return Object.values(mcpServers ?? {}).some((server) => server.customUserVars);
}
module.exports = {
getMCPAuthMap,
getCustomConfig,
getBalanceConfig,
createGetMCPAuthMap,
hasCustomUserVars,
getCustomEndpointConfig,
};

View File

@@ -1,4 +1,10 @@
const { CacheKeys, EModelEndpoint, orderEndpointsConfig } = require('librechat-data-provider');
const {
CacheKeys,
EModelEndpoint,
isAgentsEndpoint,
orderEndpointsConfig,
defaultAgentCapabilities,
} = require('librechat-data-provider');
const loadDefaultEndpointsConfig = require('./loadDefaultEConfig');
const loadConfigEndpoints = require('./loadConfigEndpoints');
const getLogStores = require('~/cache/getLogStores');
@@ -80,8 +86,12 @@ async function getEndpointsConfig(req) {
* @returns {Promise<boolean>}
*/
const checkCapability = async (req, capability) => {
const isAgents = isAgentsEndpoint(req.body?.original_endpoint || req.body?.endpoint);
const endpointsConfig = await getEndpointsConfig(req);
const capabilities = endpointsConfig?.[EModelEndpoint.agents]?.capabilities ?? [];
const capabilities =
isAgents || endpointsConfig?.[EModelEndpoint.agents]?.capabilities != null
? (endpointsConfig?.[EModelEndpoint.agents]?.capabilities ?? [])
: defaultAgentCapabilities;
return capabilities.includes(capability);
};

View File

@@ -1,7 +1,7 @@
const path = require('path');
const { loadServiceKey } = require('@librechat/api');
const { logger } = require('@librechat/data-schemas');
const { loadServiceKey, isUserProvided } = require('@librechat/api');
const { EModelEndpoint } = require('librechat-data-provider');
const { isUserProvided } = require('~/server/utils');
const { config } = require('./EndpointService');
const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, googleKey } = config;
@@ -11,28 +11,28 @@ const { openAIApiKey, azureOpenAIApiKey, useAzurePlugins, userProvidedOpenAI, go
* @param {Express.Request} req - The request object
*/
async function loadAsyncEndpoints(req) {
let i = 0;
let serviceKey, googleUserProvides;
const serviceKeyPath =
process.env.GOOGLE_SERVICE_KEY_FILE_PATH ||
path.join(__dirname, '../../..', 'data', 'auth.json');
try {
serviceKey = await loadServiceKey(serviceKeyPath);
} catch {
if (i === 0) {
i++;
/** Check if GOOGLE_KEY is provided at all(including 'user_provided') */
const isGoogleKeyProvided = googleKey && googleKey.trim() !== '';
if (isGoogleKeyProvided) {
/** If GOOGLE_KEY is provided, check if it's user_provided */
googleUserProvides = isUserProvided(googleKey);
} else {
/** Only attempt to load service key if GOOGLE_KEY is not provided */
const serviceKeyPath =
process.env.GOOGLE_SERVICE_KEY_FILE || path.join(__dirname, '../../..', 'data', 'auth.json');
try {
serviceKey = await loadServiceKey(serviceKeyPath);
} catch (error) {
logger.error('Error loading service key', error);
serviceKey = null;
}
}
if (isUserProvided(googleKey)) {
googleUserProvides = true;
if (i <= 1) {
i++;
}
}
const google = serviceKey || googleKey ? { userProvide: googleUserProvides } : false;
const google = serviceKey || isGoogleKeyProvided ? { userProvide: googleUserProvides } : false;
const useAzure = req.app.locals[EModelEndpoint.azureOpenAI]?.plugins;
const gptPlugins =

View File

@@ -25,7 +25,7 @@ let i = 0;
* @function loadCustomConfig
* @returns {Promise<TCustomConfig | null>} A promise that resolves to null or the custom config object.
* */
async function loadCustomConfig() {
async function loadCustomConfig(printConfig = true) {
// Use CONFIG_PATH if set, otherwise fallback to defaultConfigPath
const configPath = process.env.CONFIG_PATH || defaultConfigPath;
@@ -108,9 +108,11 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
return null;
} else {
logger.info('Custom config file loaded:');
logger.info(JSON.stringify(customConfig, null, 2));
logger.debug('Custom config:', customConfig);
if (printConfig) {
logger.info('Custom config file loaded:');
logger.info(JSON.stringify(customConfig, null, 2));
logger.debug('Custom config:', customConfig);
}
}
(customConfig.endpoints?.custom ?? [])
@@ -118,8 +120,8 @@ https://www.librechat.ai/docs/configuration/stt_tts`);
.forEach((endpoint) => parseCustomParams(endpoint.name, endpoint.customParams));
if (customConfig.cache) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig);
const cache = getLogStores(CacheKeys.STATIC_CONFIG);
await cache.set(CacheKeys.LIBRECHAT_YAML_CONFIG, customConfig);
}
if (result.data.modelSpecs) {

View File

@@ -8,11 +8,12 @@ const {
ErrorTypes,
EModelEndpoint,
EToolResources,
isAgentsEndpoint,
replaceSpecialVars,
providerEndpointMap,
} = require('librechat-data-provider');
const { getProviderConfig } = require('~/server/services/Endpoints');
const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getProviderConfig } = require('~/server/services/Endpoints');
const { processFiles } = require('~/server/services/Files/process');
const { getFiles, getToolFilesByIds } = require('~/models/File');
const { getConvoFiles } = require('~/models/Conversation');
@@ -42,7 +43,11 @@ const initializeAgent = async ({
allowedProviders,
isInitialAgent = false,
}) => {
if (allowedProviders.size > 0 && !allowedProviders.has(agent.provider)) {
if (
isAgentsEndpoint(endpointOption?.endpoint) &&
allowedProviders.size > 0 &&
!allowedProviders.has(agent.provider)
) {
throw new Error(
`{ "type": "${ErrorTypes.INVALID_AGENT_PROVIDER}", "info": "${agent.provider}" }`,
);
@@ -82,6 +87,7 @@ const initializeAgent = async ({
attachments: currentFiles,
tool_resources: agent.tool_resources,
requestFileSet: new Set(requestFiles?.map((file) => file.file_id)),
agentId: agent.id,
});
const provider = agent.provider;
@@ -98,7 +104,7 @@ const initializeAgent = async ({
agent.endpoint = provider;
const { getOptions, overrideProvider } = await getProviderConfig(provider);
if (overrideProvider) {
if (overrideProvider !== agent.provider) {
agent.provider = overrideProvider;
}
@@ -125,7 +131,7 @@ const initializeAgent = async ({
);
const agentMaxContextTokens = optionalChainWithEmptyCheck(
maxContextTokens,
getModelMaxTokens(tokensModel, providerEndpointMap[provider]),
getModelMaxTokens(tokensModel, providerEndpointMap[provider], options.endpointTokenConfig),
4096,
);
@@ -149,7 +155,9 @@ const initializeAgent = async ({
) {
throw new Error(`{ "type": "${ErrorTypes.GOOGLE_TOOL_CONFLICT}"}`);
} else if (
(agent.provider === Providers.OPENAI || agent.provider === Providers.AZURE) &&
(agent.provider === Providers.OPENAI ||
agent.provider === Providers.AZURE ||
agent.provider === Providers.ANTHROPIC) &&
options.tools?.length &&
structuredTools?.length
) {
@@ -178,11 +186,12 @@ const initializeAgent = async ({
return {
...agent,
tools,
attachments,
resendFiles,
toolContextMap,
tools,
maxContextTokens: (agentMaxContextTokens - maxTokens) * 0.9,
useLegacyContent: !!options.useLegacyContent,
maxContextTokens: Math.round((agentMaxContextTokens - maxTokens) * 0.9),
};
};

View File

@@ -78,7 +78,17 @@ function getLLMConfig(apiKey, options = {}) {
requestOptions.anthropicApiUrl = options.reverseProxyUrl;
}
const tools = [];
if (mergedOptions.web_search) {
tools.push({
type: 'web_search_20250305',
name: 'web_search',
});
}
return {
tools,
/** @type {AnthropicClientOptions} */
llmConfig: removeNullishValues(requestOptions),
};

View File

@@ -1,12 +1,12 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { ProxyAgent } = require('undici');
const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider');
const {
getUserKeyValues,
getUserKeyExpiry,
checkUserKeyExpiry,
} = require('~/server/services/UserService');
const OpenAIClient = require('~/app/clients/OpenAIClient');
const OAIClient = require('~/app/clients/OpenAIClient');
const { isUserProvided } = require('~/server/utils');
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
@@ -59,7 +59,10 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
}
if (PROXY) {
opts.httpAgent = new HttpsProxyAgent(PROXY);
const proxyAgent = new ProxyAgent(PROXY);
opts.fetchOptions = {
dispatcher: proxyAgent,
};
}
if (OPENAI_ORGANIZATION) {
@@ -76,7 +79,7 @@ const initializeClient = async ({ req, res, endpointOption, version, initAppClie
openai.res = res;
if (endpointOption && initAppClient) {
const client = new OpenAIClient(apiKey, clientOptions);
const client = new OAIClient(apiKey, clientOptions);
return {
client,
openai,

Some files were not shown because too many files have changed in this diff Show More