Compare commits

...

31 Commits

Author SHA1 Message Date
Marco Beretta
1386c38b14 refactor: correct type definitions for ToolInputSchema and error handling in searchFiles function 2025-05-31 20:53:12 +02:00
Marco Beretta
a06e999dd6 cleanup: code formatting and improve readability across multiple components 2025-05-31 20:48:23 +02:00
Ruben Talstra
4808c5be48 🔧 fix: Update xml-crypto and xmldom dependencies in package-lock.json (#7630) 2025-05-29 14:19:08 -04:00
Danny Avila
c517f668fc 🔧 chore: Remove rollup-plugin-visualizer 2025-05-29 11:08:42 -04:00
tsutsu3
939b4ce659 🔑 feat: SAML authentication (#6169)
* feat: add SAML authentication

* refactor: change SAML icon

* refactor: resolve SAML metadata paths using paths.js

* test: add samlStrategy tests

* fix: update setupSaml import

* test: add SAML settings tests in config.spec.js

* test: add client tests

* refactor: improve SAML button label and fallback localization

* feat: allow only one authentication method OpenID or SAML at a time

* doc: add SAML configuration sample to docker-compose.override

* fix: require SAML_SESSION_SECRET to enable SAML

* feat: update samlStrategy

* test: update samle tests

* feat: add SAML login button label to translations and remove default value

* fix: update SAML cert file binding

* chore: update override example with SAML cert volume

* fix: update SAML session handling with Redis backend

---------

Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>
2025-05-29 11:00:58 -04:00
github-actions[bot]
87255dac81 🌍 i18n: Update translation.json with latest translations (#7563)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-05-29 10:34:35 -04:00
Danny Avila
442976c74f 🔧 fix: Agent Versioning with Action Hashing and OAuth Redirect (#7627)
* 🔧 chore: Update navigateFallbackDenylist in Vite config to include API routes

* 🔧 fix: Update redirect_uri in createActionTool to use DOMAIN_SERVER instead of DOMAIN_CLIENT

* 🔧 feat: Enhance Agent Versioning with Action Metadata Hashing

- Added support for generating a hash of action metadata to detect changes and manage agent versioning.
- Updated `updateAgent` function to include an optional `forceVersion` parameter for version creation.
- Modified `isDuplicateVersion` to compare action metadata hashes.
- Updated related tests to validate new versioning behavior with action changes.
- Refactored agent update logic to ensure proper tracking of user updates and version history.
2025-05-29 10:30:35 -04:00
Michael Clark
fb88ac00c6 ℹ️ feat: Add icons for Google, OpenAI, and Qwen endpoints (#7428)
Co-authored-by: aoaim <assertivemiao@outlook.com>
2025-05-29 08:32:41 -04:00
derek jackson
b846f562be ☀️ a11y: Add Missing Focus to Model Selector in Light Mode (#7607) 2025-05-29 08:27:23 -04:00
Ruben Talstra
5cf86b347f 💸 feat: Balance Tab in Settings Dialog (#6537)
* 🚀 feat: Implement Auto-Refill Settings for Balance

* 🎨 feat: add `copy-tex` to improve copying KaTeX (#7308)

When selecting equations and using copy paste, uses the correct latex code.

Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>

* 🔃 refactor: `AgentFooter` to conditionally render buttons based on `activePanel` (#7306)

* 🚀 feat: Add `Cloudflare Turnstile` support (#5987)

* 🚀 feat: Add @marsidev/react-turnstile dependency to package.json and package-lock.json

* 🚀 feat: Integrate Cloudflare Turnstile configuration support in AppService and add schema validation

* 🚀 feat: Implemented Cloudflare Turnstile integration in Login and Registration forms

* 🚀 feat: Enhance AppService tests with additional mocks and configuration setups

* 🚀 feat: Comment out outdated config version warning tests in AppService.spec.js

* 🚀 feat: Remove outdated warning tests and add new checks for environment variables and API health

* 🔧 test: Update AppService.spec.js to use expect.anything() for paths validation

* 🔧 test: Refactor AppService.spec.js to streamline mocks and enhance clarity

* 🔧 chore: removed not needed test

* Potential fix for code scanning alert no. 5638: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5629: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5642: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Update turnstile.js

* Potential fix for code scanning alert no. 5634: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5646: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5647: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

---------

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5764: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* Potential fix for code scanning alert no. 5765: Ensure code is properly formatted, use insertion, deletion, or replacement to obtain desired formatting.

Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>

* 🖼️ feat: Tool Call and Loading UI Refresh, Image Resize Config (#7086)

*  feat: Enhance Spinner component with customizable properties and improved animation

* 🔧 fix: Replace Loader with Spinner in RunCode component and update FilePreview to use Spinner for progress indication

*  feat: Refactor icons in CodeProgress and CancelledIcon components; enhance animation and styling in ExecuteCode and ProgressText components

*  feat: Refactor attachment handling in ExecuteCode component; replace individual attachment rendering with AttachmentGroup for improved structure

*  feat: Refactor dialog components for improved accessibility and styling; integrate Skeleton loading state in Image component

*  feat: Refactor ToolCall component to use ToolCallInfo for better structure; replace ToolPopover with AttachmentGroup; enhance ProgressText with error handling and improved UI elements

* 🔧 fix: Remove unnecessary whitespace in ProgressText

* 🔧 fix: Remove unnecessary margin from AgentFooter and AgentPanel components; clean up SidePanel imports

*  feat: Enhance ToolCall and ToolCallInfo components with improved styling; update translations and add warning text color to Tailwind config

* 🔧 fix: Update import statement for useLocalize in ToolCallInfo component; fix: chatform transition

*  feat: Refactor ToolCall and ToolCallInfo components for improved structure and styling; add optimized code block for better output display

*  feat: Implement OpenAI image generation component; add progress tracking and localization for user feedback

* 🔧 fix: Adjust base duration values for image generation; optimize timing for quality settings

* chore: remove unnecessary space

*  feat: Enhance OpenAI image generation with editing capabilities; update localization for progress feedback

*  feat: Add download functionality to images; enhance DialogImage component with download button

*  feat: Enhance image resizing functionality; support custom percentage and pixel dimensions in resizeImageBuffer

* 📊 feat: Improve Helm Chart (#3638)

* Replaced Helm Charts with Blue Atlas Charts

* Fix Workflow

* improve docs

* update gitignore

* Update docs

* change values order, add hpa

* change tls example domain

* Default: Enable liveness and readiness

* chore: bump base chart

* apply requested changes

* add Release fix

* add: error handling

* chore: cleanup and testing

* fix: adjust Chart.yaml

---------

Co-authored-by: hofq <gregorspalme@protonmail.com>
Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>

* 📜 docs: Unreleased Changelog (#7434)

* action: update Unreleased changelog

* Update CHANGELOG.md

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>

* 🛡️ chore: `multer` v2.0.0 for CVE-2025-47935 and CVE-2025-47944 (#7454)

* chore: bump multer to v2.0.0 to resolve CVE-2025-47935 and CVE-2025-47944

* chore: temp. remove helmet dependency to appease unused NPM package workflow

* 🎚️ feat: Custom Parameters (#7342)

* #

* - refactor: simplified getCustomConfig func

* #

* - feature: persist values for parameters with optionType of custom

* #

* - refactor: moved `Parameters/settings.ts` into `data-provider` so that both frontend and backend code can use it.

* - feature: loadCustomConfig can now parse and validate customParams property for `endpoints.custom` in `librechat.yaml`

* # fixed linter

* # removed .strict() in config.ts

* change: added packages/data-provider/src to SOURCE_DIRS for i18n check

* # removed unnecessary lodash imports

* # addressed PR comments
# fixed lint for updated files

* # better import for lodash (w/o relying on tree-shaking)

* 📃 fix: Ensure MCP Resources Pass Name and Description Fields to LLM (#7442)

* 🔗 feat: Support Environment Variables in MCP URL Config (#7424)

* 🦙 chore: Add `llama-4` to Vision Models List (#7433)

* 🔧 fix: File Deletion for Azure Assistants API (#7466)

* 🔬 fix: File Search Request Format (Azure Assistants API) (#7404)

* fix: The request format for file analysis with Azure OpenAI assistants

  The request format for file analysis with Azure OpenAI assistants differs from that of OpenAI. This fix updates the API to use attachments instead of file_ids. danny-avila#7379

* chore: ESLint Error

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>

* 🖼️ chore: Linting & Transition Styling in UI Components (#7467)

* chore: linting

* 🔧 fix: Correctly parse dimensions for image width and height in OpenAIImageGen component

* style: overlay class for DialogImage component to improve visibility

* style: Update transition timing function for PixelCard component to rely on style props

*  fix: Emojis rendering in `SplitText` Animation (#7460)

* 📂 refactor: Improve `FileAttachment` & File Form Deletion (#7471)

* refactor: optional attachment properties for `FileAttachment`

* refactor: update ActionButton to use localized text

* chore: localize text in DataTableFile, add missing translation, imports order, and linting

* chore: linting in DataTable

* fix: integrate Recoil state management for file deletion in DataTableFile

* fix: integrate Recoil state management for file deletion in DataTable

* fix: add temp_file_id to BatchFile type and update deleteFiles logic to properly remove files that are mapped to temp_file_id

* 🌍 i18n: Update translation.json with latest translations (#7468)

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* 🦾 feat: Claude-4 Support (#7509)

* refactor: Update AnthropicClient to support Claude model naming changes

* Renamed `isClaude3` to `isClaudeLatest` to accommodate newer Claude models.
* Updated logic to determine if the model is part of the Claude family.
* Adjusted `useMessages` property to reflect the new model naming convention.
* Cleaned up client properties during disposal to match the updated naming.

* feat: Claude-4 Support

* feat: Add Thinking and Prompt caching support for Claude 4

* chore: Update ANTHROPIC_MODELS in .env.example for latest model versions

* 📊 chore: Remove Old Helm Chart (#7512)

Co-authored-by: hofq <gregorspalme@protonmail.com>

* 🪨 feat: Bedrock Support for Claude-4 Reasoning (#7517)

* 🗑️ chore: Update .gitignore to reflect AI-related files

* chore: linting in Bedrock options.js

* 🪨 feat: Bedrock Claude-4 Reasoning

* 🪖 chore: bump helm app version to v0.7.8 (#7524)

- bump helm app version to match the latest
      release version

*  feat: Agent Version History and Management (#7455)

*  feat: Enhance agent update functionality to save current state in versions array

- Updated the `updateAgent` function to push the current agent's state into a new `versions` array when an agent is updated.
- Modified the agent schema to include a `versions` field for storing historical states of agents.

*  feat: Add comprehensive CRUD operations for agents in tests

- Introduced a new test suite for CRUD operations on agents, including create, read, update, and delete functionalities.
- Implemented tests for listing agents by author and updating agent projects.
- Enhanced the agent model to support version history tracking during updates.
- Ensured proper environment variable management during tests.

*  feat: Introduce version tracking for agents and enhance UI components

- Added a `version` property to the agent model to track the number of versions.
- Updated the `getAgentHandler` to include the agent's version in the response.
- Introduced a new `VersionButton` component for navigating to the version panel.
- Created a `VersionPanel` component for displaying version-related information.
- Updated the UI to conditionally render the version button and panel based on the active state.
- Added localization for the new version-related UI elements.

*  i18n: Add "version" translation key across multiple languages

- Introduced the "com_ui_agent_version" translation key in various language files to support version tracking for agents.
- Updated Arabic, Czech, German, English, Spanish, Estonian, Persian, Finnish, French, Hebrew, Hungarian, Indonesian, Italian, Japanese, Korean, Dutch, Polish, Portuguese (Brazil and Portugal), Russian, Swedish, Thai, Turkish, Vietnamese, and Chinese (Simplified and Traditional) translations.

*  feat: Update AgentFooter to conditionally render AdminSettings

- Modified the logic for displaying buttons in the AgentFooter component to only show them when the active panel is the builder.
- Ensured that AdminSettings is displayed only when the user has an admin role and the buttons are visible.

*  feat: Enhance AgentPanelSwitch and VersionPanel for improved agent capabilities

- Updated AgentPanelSwitch to include a new VersionPanel for displaying version-related information.
- Enhanced agentsConfig logic to properly handle agent capabilities.
- Modified VersionPanel to improve structure and localization support.
- Integrated createAgent mutation for future agent creation functionality.

*  feat: Enhance VersionPanel to display agent version history and loading states

- Integrated version fetching logic in VersionPanel to retrieve and display agent version history.
- Added loading and error handling states to improve user experience.
- Updated agent schema to use mixed types for versions, allowing for more flexible version data structures.
- Introduced localization support for version-related UI elements.

*  feat: Update VersionPanel and AgentPanelSwitch to enhance agent selection and version display

- Modified AgentPanelSwitch to pass selectedAgentId to VersionPanel for improved agent context.
- Enhanced VersionPanel to handle multiple timestamp formats and display appropriate messages when no agent is selected.
- Improved structure and readability of the VersionPanel component by adding a helper function for timestamp retrieval.

*  feat: Refactor VersionPanel to utilize localization and improve timestamp handling

- Replaced hardcoded text constants with localization support for various UI elements in VersionPanel.
- Enhanced the timestamp retrieval function to handle errors gracefully and utilize localized messages for unknown dates.
- Improved user feedback by displaying localized messages for agent selection, version errors, and empty states.

*  refactor: Clean up VersionPanel by removing unused code and improving timestamp handling

*  feat: Implement agent version reverting functionality

- Added `revertAgentVersion` method in the Agent model to allow reverting to a previous version of an agent.
- Introduced `revertAgentVersionHandler` in the agents controller to handle requests for reverting agent versions.
- Updated API routes to include a new endpoint for reverting agent versions.
- Enhanced the VersionPanel component to support version restoration with user confirmation and feedback.
- Added localization support for success and error messages related to version restoration.

*  i18n: Add localization for agent version restoration messages

* Simplify VersionPanel by removing unused parameters and enhancing agent ID handling

* Refactor Agent model and VersionPanel component to streamline version data handling

* Update version handling in Agent model and VersionPanel

- Enhanced the Agent model to include an `updatedAt` timestamp when pushing new versions.
- Improved the VersionPanel component to sort versions by the `updatedAt` timestamp for better display order.
- Added a new localization entry for indicating the active version of an agent.

*  i18n: Add localization for active agent version across multiple languages

*  feat: Introduce version management components for agent history

- Added `isActiveVersion` utility to determine the active version of an agent based on various criteria.
- Implemented `VersionContent` and `VersionItem` components to display agent version history, including loading and error states.
- Enhanced `VersionPanel` to integrate new components and manage version context effectively.
- Added comprehensive tests for version management functionalities to ensure reliability and correctness.

* Add unit tests for AgentFooter component

* cleanup

* Enhance agent version update handling and add unit tests for update operators

- Updated the `updateAgent` function to properly handle various update operators ($push, $pull, $addToSet) while maintaining version history.
- Modified unit tests to validate the correct behavior of agent updates, including versioning and tool management.

* Enhance version comparison logic and update tests for artifacts handling

- Modified the `isActiveVersion` utility to include artifacts in the version comparison criteria.
- Updated the `VersionPanel` component to support artifacts in the agent state.
- Added new unit tests to validate artifacts matching scenarios and edge cases in the `isActiveVersion` function.

* Implement duplicate version detection in agent updates and enhance error handling

- Added `isDuplicateVersion` function to check for identical versions during agent updates, excluding certain fields.
- Updated `updateAgent` function to throw an error if a duplicate version is detected, with detailed error information.
- Enhanced the `updateAgentHandler` to return appropriate responses for duplicate version errors.
- Modified client-side error handling to display user-friendly messages for duplicate version scenarios.
- Added comprehensive unit tests to validate duplicate version detection and error handling across various update scenarios.

* Update version title localization to include version number across multiple languages

- Modified the `com_ui_agent_version_title` translation key to include a placeholder for the version number in various language files.
- Enhanced the `VersionItem` component to utilize the updated localization for displaying version titles dynamically.

* Enhance agent version handling and add revert functionality

- Updated the `isDuplicateVersion` function to improve version comparison logic, including special handling for `projectIds` and arrays of objects.
- Modified the `updateAgent` function to streamline version updates and removed unnecessary checks for test environments.
- Introduced a new `revertAgentVersion` function to allow reverting agents to specific versions, with detailed documentation.
- Enhanced unit tests to validate duplicate version detection and revert functionality, ensuring robust error handling and version management.

* fix CI issues

* cleanup

* Revert all non-English translations

* clean up tests

* *️⃣ feat: Reuse OpenID Auth Tokens (#7397)

* feat: integrate OpenID Connect support with token reuse

- Added `jwks-rsa` and `new-openid-client` dependencies for OpenID Connect functionality.
- Implemented OpenID token refresh logic in `AuthController`.
- Enhanced `LogoutController` to handle OpenID logout and session termination.
- Updated JWT authentication middleware to support OpenID token provider.
- Modified OAuth routes to accommodate OpenID authentication and token management.
- Created `setOpenIDAuthTokens` function to manage OpenID tokens in cookies.
- Upgraded OpenID strategy with user info fetching and token exchange protocol.
- Introduced `openIdJwtLogin` strategy for handling OpenID JWT tokens.
- Added caching mechanism for exchanged OpenID tokens.
- Updated configuration to include OpenID exchanged tokens cache key.
- updated .env.example to include the new env variables needed for the feature.

* fix: update return type in downloadImage documentation for clarity and fixed openIdJwtLogin env variables

* fix: update Jest configuration and tests for OpenID strategy integration

* fix: update OpenID strategy to include callback URL in setup

* fix: fix optionalJwtAuth middleware to support OpenID token reuse and improve currentUrl method in CustomOpenIDStrategy to override the dynamic host issue related to proxy (e.g. cloudfront)

* fix: fixed code formatting

* Fix: Add mocks for openid-client and passport strategy in Jest configuration to fix unit tests

* fix eslint errors: Format mock file openid-client.

*  feat: Add PKCE support for OpenID and default handling in strategy setup

---------

Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com>
Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>

* 🔎 feat: Native Web Search with Citation References (#7516)

* WIP: search tool integration

* WIP: Add web search capabilities and API key management to agent actions

* WIP: web search capability to agent configuration and selection

* WIP: Add web search capability to backend agent configuration

* WIP: add web search option to default agent form values

* WIP: add attachments for web search

* feat: add plugin for processing web search citations

* WIP: first pass, Citation UI

* chore: remove console.log

* feat: Add AnimatedTabs component for tabbed UI functionality

* refactor: AnimatedTabs component with CSS animations and stable ID generation

* WIP example content

* feat: SearchContext for managing search results apart from MessageContext

* feat: Enhance AnimatedTabs with underline animation and state management

* WIP: first pass, Implement dynamic tab functionality in Sources component with search results integration

* fix: Update class names for improved styling in Sources and AnimatedTabs components

* feat: Improve styling and layout in Sources component with enhanced button and item designs

* feat: Refactor Sources component to integrate OGDialog for source display and improve layout

* style: Update background color in SourceItem and SourcesGroup components for improved visibility

* refactor: Sources component to enhance SourceItem structure and improve favicon handling

* style: Adjust font size of domain text in SourceItem for better readability

* feat: Add localization for citation source and details in CompositeCitation component

* style: add theming to Citation components

* feat: Enhance SourceItem component with dialog support and improved hovercard functionality

* feat: Add localization for sources tab and image alt text in Sources component

* style: Replace divs with spans for better semantic structure in CompositeCitation and Citation components

* refactor: Sources component to use useMemo for tab generation and improve performance

* chore: bump @librechat/agents to v2.4.318

* chore: update search result types

* fix: search results retrieval in ContentParts component, re-render attachments when expected

* feat: update sources style/types to use latest search result structure

* style: enhance Dialog (expanded) SourceItem component with link wrapping and improved styling

* style: update ImageItem component styling for improved title visibility

* refactor: remove SourceItemBase component and adjust SourceItem layout for improved styling

* chore: linting twcss order

* fix: prevent FileAttachment from rendering search attachments

* fix: append underscore to responseMessageId for unique identification to prevent mapping of previous latest message's attachments

* chore: remove unused parameter 'useSpecs' from loadTools function

* chore: twcss order

* WIP: WebSearch Tool UI

* refactor: add limit parameter to StackedFavicons for customizable source display

* refactor: optimize search results memoization by making more granular and separate conerns

* refactor: integrated StackedFavicons to WebSearch mid-run

* chore: bump @librechat/agents to expose handleToolCallChunks

* chore: use typedefs from dedicated file instead of defining them in AgentClient module

* WIP: first pass, search progress results

* refactor: move createOnSearchResults function to a dedicated search module

* chore: bump @librechat/agents to v2.4.320

* WIP: first pass, search results processed UX

* refactor: consolidate context variables in createOnSearchResults function

* chore: bump @librechat/agents to v2.4.321

* feat: add guidelines for web search tool response formatting in loadTools function

* feat: add isLast prop to Part component and update WebSearch logic for improved state handling

* style: update Hovercard styles for improved UI consistency

* feat: export FaviconImage component for improved accessibility in other modules

* refactor: export getCleanDomain function and use FaviconImage in Citation component for improved source representation

* refactor: implement SourceHovercard component for consistency and DRY compliance

* fix: replace <p> with <span> for snippet and title in SourceItem and SourceHovercard for consistency

* style: `not-prose`

* style: remove 'not-prose' class for consistency in SourceItem, Citation, and SourceHovercard components, adjust style classes

* refactor: `imageUrl` on hover and prevent duplicate sources

* refactor: enhance SourcesGroup dialog layout and improve source item presentation

* refactor: reorganize Web Components, save in same directory

* feat: add 'news' refType to refTypeMap for citation sources

* style: adjust Hovercard width for improved layout

* refactor: update tool usage guidelines for improved clarity and execution

* chore: linting

* feat: add Web Search badge with initial permissions and local storage logic

* feat: add webSearch support to interface and permissions schemas

* feat: implement Web Search API key management and localization updates

* feat: refactor Web Search API key handling and integrate new search API key form

* fix: remove unnecessary visibility state from FileAttachment component

* feat: update WebSearch component to use Globe icon and localized search label

* feat: enhance ApiKeyDialog with dropdown for reranker selection and update translations

* feat: implement dropdown menus for engine, scraper, and reranker selection in ApiKeyDialog

* chore: linting and add unknown instead of `any` type

* feat: refactor ApiKeyDialog and useAuthSearchTool for improved API key management

* refactor: update ocrSchema to use template literals for default apiKey and baseURL

* feat: add web search configuration and utility functions for environment variable extraction

* fix: ensure filepath is defined before checking its prefix in useAttachmentHandler

* feat: enhance web search functionality with improved configuration and environment variable extraction for authFields

* fix: update auth type in TPluginAction and TUpdateUserPlugins to use Partial<Record<string, string>>

* feat: implement web search authentication verification and enhance webSearchAuth structure

* feat: enhance ephemeral agent handling with new web search capability and type definition

* feat: enhance isEphemeralAgent function to include web search selection

* feat: refactor verifyWebSearchAuth to improve key handling and authentication checks

* feat: implement loadWebSearchAuth function for improved web search authentication handling

* feat: enhance web search authentication with new configuration options and refactor related types

* refactor: rename search engine to search provider and update related localization keys

* feat: update verifyWebSearchAuth to handle multiple authentication types and improve error handling

* feat: update ApiKeyDialog to accept authTypes prop and remove isUserProvided check

* feat: add tests for extractWebSearchEnvVars and loadWebSearchAuth functions

* feat: enhance loadWebSearchAuth to support specific service checks for providers, scrapers, and rerankers

* fix: update web search configuration key and adjust auth result handling in loadTools function

* feat: add new progress key for repeated web searching and update localization

* chore: bump @librechat/agents to 2.4.322

* feat: enhance loadTools function to include ISO time and improve search tool logging

* feat: update StackedFavicons to handle negative start index and improve citation attribution styling and text

* chore: update .gitignore to categorize AI-related files

* fix: mobile responsiveness of sources/citations hovercards

* feat: enhance source display with improved line clamping for better readability

* chore: bump @librechat/agents to v2.4.33

* feat: add handling for image sources in references mapping

* chore: bump librechat-data-provider version to 0.7.84

* chore: bump @librechat/agents version to 2.4.34

* fix: update auth handling to support multiple auth types in tools and allow key configuration in agent panel

* chore: remove redundant agent attribution text from search form

* fix: web search auth uninstall

* refactor: convert CheckboxButton to a forwardRef component and update setValue callback signature

* feat: add triggerRef prop to ApiKeyDialog components for improved dialog control

* feat: integrate triggerRef in CodeInterpreter and WebSearch components for enhanced dialog management

* feat: enhance ApiKeyDialog with additional links for Firecrawl and Jina API key guidance

* feat: implement web search configuration handling in ApiKeyDialog and add tests for dropdown visibility

* fix: update webSearchConfig reference in config route for correct payload assignment

* feat: update ApiKeyDialog to conditionally render sections based on authTypes and modify loadWebSearchAuth to correctly categorize authentication types

* feat: refactor ApiKeyDialog and related tests to use SearchCategories and RerankerTypes enums and remove nested ternaries

* refactor: move ThinkingButton rendering to improve layout consistency in ContentParts

* feat: integrate search context into Markdown component to conditionally include unicodeCitation plugin

* chore: bump @librechat/agents to v2.4.35

* chore: remove unused 18n key

* ci: add WEB_SEARCH permission testing and update AppService tests for new webSearch configuration

* ci: add more comprehensive tests for loadWebSearchAuth to validate authentication handling and authTypes structure

* chore: remove debugging console log from web.spec.ts to clean up test output

* 🧹 chore: Bump Agents Dependencies (#7525)

* chore: bump langchain dependencies

* chore: bump @librechat/agents to v2.4.36

* chore: bump @librechat/agents to v2.4.37

* refactor: simplify remark plugins in Markdown component with no conditional usage

* 🔧 refactor: Progress Text Localization for Running Tools (#7526)

* 🔧 chore: Bump Data Provider and Custom Config Versions (#7527)

* 🔧 chore: Update CONFIG_VERSION to 1.2.6

* 🔧 chore: Update librechat-data-provider version to 0.7.85

* 👤 feat: Enhance Agent Versioning to Track User Updates (#7523)

* feat: Enhance agent update functionality to track user updates

- Updated `updateAgent` function to accept an `updatingUserId` parameter for tracking who made changes.
- Modified agent versioning to include `updatedBy` field for better audit trails.
- Adjusted related functions and tests to ensure proper handling of user updates and version history.
- Enhanced tests to verify correct tracking of `updatedBy` during agent updates and restorations.

* fix: Refactor import tests for improved readability and consistency

- Adjusted formatting in `importChatGptConvo` test to enhance clarity.
- Updated expected output string in `processAssistantMessage` test to use double quotes for consistency.
- Modified processing time expectation in `processAssistantMessage` test to allow for CI environment variability.

* 🧩 feat: Web Search Config Validations & Clipboard Citation Processing (#7530)

* 🔧 chore: Add missing optional `scraperTimeout` to webSearchSchema

* chore: Add missing optional `scraperTimeout` to web search authentication result

* chore: linting

* feat: Integrate attachment handling and citation processing in message components

- Added `useAttachments` hook to manage message attachments and search results.
- Updated `MessageParts`, `ContentParts`, and `ContentRender` components to utilize the new hook for improved attachment handling.
- Enhanced `useCopyToClipboard` to format citations correctly, including support for composite citations and deduplication.
- Introduced utility functions for citation processing and cleanup.
- Added tests for the new `useCopyToClipboard` functionality to ensure proper citation formatting and handling.

* feat: Add configuration for LibreChat Code Interpreter API and Web Search variables

* fix: Update searchResults type to use SearchResultData for better type safety

* feat: Add web search configuration validation and logging

- Introduced `checkWebSearchConfig` function to validate web search configuration values, ensuring they are environment variable references.
- Added logging for proper configuration and warnings for incorrect values.
- Created unit tests for `checkWebSearchConfig` to cover various scenarios, including valid and invalid configurations.

* docs: Update README to include Web Search feature details

- Added a section for the Web Search feature, highlighting its capabilities to search the internet and enhance AI context.
- Included links for further information on the Web Search functionality.

* ci: Add mock for checkWebSearchConfig in AppService tests

* chore: linting

* feat: Enhance Shared Messages with Web Search UI by adding searchResults prop to SearchContent and MinimalHoverButtons components

* chore: linting

* refactor: remove Meilisearch index sync from importConversations function

* feat: update safeSearch implementation to use SafeSearchTypes enum

* refactor: remove commented-out code in loadTools function

* fix: ensure responseMessageId handles latestMessage ID correctly

* feat: enhance Vite configuration for improved chunking and caching

- Added additional globIgnores for map files in Workbox configuration.
- Implemented high-impact chunking for various large libraries to optimize performance.
- Increased chunkSizeWarningLimit from 1200 to 1500 for better handling of larger chunks.

* refactor: move health check hook to Root, fix bad setState for Temporary state

- Enhanced the `useHealthCheck` hook to initiate health checks only when the user is authenticated.
- Added logic for managing health check intervals and handling window focus events.
- Introduced a new test suite for `useHealthCheck` to cover various scenarios including authentication state changes and error handling.
- Removed the health check invocation from `ChatRoute` and added it to `Root` for global health monitoring.

* fix: update font alias in Vite configuration for correct path resolution

* 🌍 i18n: Update translation.json with latest translations (#7532)

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>

* 🔧 chore: Update data-provider dependencies for typing (#7533)

- Updated dependencies to include @langchain/core and @types/winston in both package-lock.json and data-provider package.json.

* 🔧 fix: Artifacts Display Crash on Close and Max Width (#7540)

* 🔧 chore: Update react-resizable-panels dependency to version 3.0.2 in package.json and package-lock.json

* fix: Simplify order assignment in SidePanel component based on hasArtifacts condition, fixed frontend crash when artifacts are closed

* refactor: Change throttledSaveLayout to use useMemo for improved performance in SidePanelGroup component

* refactor: Update dependencies in SidePanel component's useEffect hooks for improved responsiveness

* 🏷️ refactor: EditPresetDialog UI and Remove `chatGptLabel` from Presets (#7543)

* fix: add necessary dep., remove unnecessary dep from useMentions memoization

* fix: Migrate deprecated chatGptLabel to modelLabel in cleanupPreset and simplify getPresetTitle logic

* fix: Enhance cleanupPreset to remove empty chatGptLabel and add comprehensive tests for label migration and preset handling

* chore: Update endpointType prop in PopoverButtons to allow null values for better flexibility

* refactor: Replace Dialog with OGDialog in EditPresetDialog for improved UI consistency and structure

* style: Update EditPresetDialog layout and styling for improved responsiveness and consistency

* 📦 refactor: Add Additional Chunking to Vite Config (#7544)

*  refactor: Add Additional Chunking to Vite Config

* chore: Integrate rollup-plugin-visualizer for bundle analysis in Vite config & add @codemirror chunks

*  fix: Debounce `setUserContext` and Default State Param for OpenID Auth (#7559)

* fix: Add default random state parameter to OpenID auth request for providers that require it; ensure passport strategy uses it

*  refactor: debounce setUserContext to avoid race condition

* refactor: Update OpenID authentication to use randomState from openid-client

* chore: linting in presetSettings type definition

* chore: import order in ModelPanel

* refactor: remove `isLegacyOutput` property from AnthropicClient since only used where defined, add latest models to non-legacy patterns, and remove from client cleanup

* refactor: adjust grid layout in Parameters component for improved responsiveness

* refactor: adjust grid layout in ModelPanel for improved display of model parameters

* test: add cases for maxOutputTokens handling in Claude 4 Sonnet and Opus models

* ci: mock loadCustomConfig in server tests and refactor OpenID route for improved authentication handling

* 🚀 feat: Implement Auto-Refill Settings for Balance

* fix: ESLint

*  feat: Enhance Auto-Refill Settings with Validation and Localization

---------

Co-authored-by: andresgit <9771158+andresgit@users.noreply.github.com>
Co-authored-by: matt burnett <mawburn@users.noreply.github.com>
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
Co-authored-by: hofq <54744977+hofq@users.noreply.github.com>
Co-authored-by: hofq <gregorspalme@protonmail.com>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
Co-authored-by: Theo N. Truong <644650+nhtruong@users.noreply.github.com>
Co-authored-by: René Honig <5851246+renehonig@users.noreply.github.com>
Co-authored-by: Ben Verhees <ben.verhees@iodigital.com>
Co-authored-by: Amgad Hasan <109704569+AmgadHasan@users.noreply.github.com>
Co-authored-by: arthurolivierfortin <118319678+arthurolivierfortin@users.noreply.github.com>
Co-authored-by: Danny Avila <danacordially@gmail.com>
Co-authored-by: Sebastien Bruel <93573440+sbruel@users.noreply.github.com>
Co-authored-by: Austin Barrington <31205926+austin-barrington@users.noreply.github.com>
Co-authored-by: Peter <peter.rothlaender@gmail.com>
Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com>
2025-05-29 08:25:37 -04:00
Danny Avila
f556aaeaea 🔧 refactor: Build Process and Static Asset Handling (#7605)
* 🔧 chore: Update build script to include post-build image removal

* refactor: staticCache middleware with options and special handling for manifest/sw/index files

* refactor(pwa): optimize service worker caching strategy

* refactor: streamline post-build process and update public directory handling

* chore: remove external images from rollupOptions in Vite config

* chore: enhance logging message in post-build script for clarity
2025-05-28 11:48:04 -04:00
Danny Avila
2f462c9b3c 🔧 refactor: Centralize Default Agent Capabilities and Better Logging (#7598)
* refactor: Simplify grid column calculation in SourcesGroup component

* refactor: Centralize default agent capabilities and simplify capability assignment

* Edge case: use defined/fallback capabilities for ephemeral agents when the "agents" endpoint is not enabled

* refactor: consolidate gemini 2 vision check

* feat: enhance capability check logging for agents

* chore: update librechat-data-provider version to 0.7.86

* refactor: import default agent capabilities for enhanced capability management

* chore: standardize quotes in error message check for consistency

* fix: improve error logging both client and api-side for mistral ocr upload errors

* ci: update error handling in MistralOCR tests to use specific error message
2025-05-27 15:48:43 -04:00
github-actions[bot]
077b7e7e79 📜 docs: Unreleased Changelog (#7560)
* action: update Unreleased changelog

* Update CHANGELOG.md

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-05-27 15:47:36 -04:00
Danny Avila
c68cc0a550 fix: Debounce setUserContext and Default State Param for OpenID Auth (#7559)
* fix: Add default random state parameter to OpenID auth request for providers that require it; ensure passport strategy uses it

*  refactor: debounce setUserContext to avoid race condition

* refactor: Update OpenID authentication to use randomState from openid-client

* chore: linting in presetSettings type definition

* chore: import order in ModelPanel

* refactor: remove `isLegacyOutput` property from AnthropicClient since only used where defined, add latest models to non-legacy patterns, and remove from client cleanup

* refactor: adjust grid layout in Parameters component for improved responsiveness

* refactor: adjust grid layout in ModelPanel for improved display of model parameters

* test: add cases for maxOutputTokens handling in Claude 4 Sonnet and Opus models

* ci: mock loadCustomConfig in server tests and refactor OpenID route for improved authentication handling
2025-05-25 23:40:37 -04:00
Danny Avila
deb8a00e27 📦 refactor: Add Additional Chunking to Vite Config (#7544)
*  refactor: Add Additional Chunking to Vite Config

* chore: Integrate rollup-plugin-visualizer for bundle analysis in Vite config & add @codemirror chunks
2025-05-24 19:47:17 -04:00
Danny Avila
b45ff8e4ed 🏷️ refactor: EditPresetDialog UI and Remove chatGptLabel from Presets (#7543)
* fix: add necessary dep., remove unnecessary dep from useMentions memoization

* fix: Migrate deprecated chatGptLabel to modelLabel in cleanupPreset and simplify getPresetTitle logic

* fix: Enhance cleanupPreset to remove empty chatGptLabel and add comprehensive tests for label migration and preset handling

* chore: Update endpointType prop in PopoverButtons to allow null values for better flexibility

* refactor: Replace Dialog with OGDialog in EditPresetDialog for improved UI consistency and structure

* style: Update EditPresetDialog layout and styling for improved responsiveness and consistency
2025-05-24 19:24:42 -04:00
Danny Avila
fc8d24fa5b 🔧 fix: Artifacts Display Crash on Close and Max Width (#7540)
* 🔧 chore: Update react-resizable-panels dependency to version 3.0.2 in package.json and package-lock.json

* fix: Simplify order assignment in SidePanel component based on hasArtifacts condition, fixed frontend crash when artifacts are closed

* refactor: Change throttledSaveLayout to use useMemo for improved performance in SidePanelGroup component

* refactor: Update dependencies in SidePanel component's useEffect hooks for improved responsiveness
2025-05-24 16:53:46 -04:00
Danny Avila
449d9b7613 🔧 chore: Update data-provider dependencies for typing (#7533)
- Updated dependencies to include @langchain/core and @types/winston in both package-lock.json and data-provider package.json.
2025-05-24 10:40:13 -04:00
github-actions[bot]
ddb0a7a216 🌍 i18n: Update translation.json with latest translations (#7532)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-05-24 10:37:12 -04:00
Danny Avila
b2f44fc90f 🧩 feat: Web Search Config Validations & Clipboard Citation Processing (#7530)
* 🔧 chore: Add missing optional `scraperTimeout` to webSearchSchema

* chore: Add missing optional `scraperTimeout` to web search authentication result

* chore: linting

* feat: Integrate attachment handling and citation processing in message components

- Added `useAttachments` hook to manage message attachments and search results.
- Updated `MessageParts`, `ContentParts`, and `ContentRender` components to utilize the new hook for improved attachment handling.
- Enhanced `useCopyToClipboard` to format citations correctly, including support for composite citations and deduplication.
- Introduced utility functions for citation processing and cleanup.
- Added tests for the new `useCopyToClipboard` functionality to ensure proper citation formatting and handling.

* feat: Add configuration for LibreChat Code Interpreter API and Web Search variables

* fix: Update searchResults type to use SearchResultData for better type safety

* feat: Add web search configuration validation and logging

- Introduced `checkWebSearchConfig` function to validate web search configuration values, ensuring they are environment variable references.
- Added logging for proper configuration and warnings for incorrect values.
- Created unit tests for `checkWebSearchConfig` to cover various scenarios, including valid and invalid configurations.

* docs: Update README to include Web Search feature details

- Added a section for the Web Search feature, highlighting its capabilities to search the internet and enhance AI context.
- Included links for further information on the Web Search functionality.

* ci: Add mock for checkWebSearchConfig in AppService tests

* chore: linting

* feat: Enhance Shared Messages with Web Search UI by adding searchResults prop to SearchContent and MinimalHoverButtons components

* chore: linting

* refactor: remove Meilisearch index sync from importConversations function

* feat: update safeSearch implementation to use SafeSearchTypes enum

* refactor: remove commented-out code in loadTools function

* fix: ensure responseMessageId handles latestMessage ID correctly

* feat: enhance Vite configuration for improved chunking and caching

- Added additional globIgnores for map files in Workbox configuration.
- Implemented high-impact chunking for various large libraries to optimize performance.
- Increased chunkSizeWarningLimit from 1200 to 1500 for better handling of larger chunks.

* refactor: move health check hook to Root, fix bad setState for Temporary state

- Enhanced the `useHealthCheck` hook to initiate health checks only when the user is authenticated.
- Added logic for managing health check intervals and handling window focus events.
- Introduced a new test suite for `useHealthCheck` to cover various scenarios including authentication state changes and error handling.
- Removed the health check invocation from `ChatRoute` and added it to `Root` for global health monitoring.

* fix: update font alias in Vite configuration for correct path resolution
2025-05-24 10:23:17 -04:00
matt burnett
cede5d120c 👤 feat: Enhance Agent Versioning to Track User Updates (#7523)
* feat: Enhance agent update functionality to track user updates

- Updated `updateAgent` function to accept an `updatingUserId` parameter for tracking who made changes.
- Modified agent versioning to include `updatedBy` field for better audit trails.
- Adjusted related functions and tests to ensure proper handling of user updates and version history.
- Enhanced tests to verify correct tracking of `updatedBy` during agent updates and restorations.

* fix: Refactor import tests for improved readability and consistency

- Adjusted formatting in `importChatGptConvo` test to enhance clarity.
- Updated expected output string in `processAssistantMessage` test to use double quotes for consistency.
- Modified processing time expectation in `processAssistantMessage` test to allow for CI environment variability.
2025-05-23 20:47:14 -04:00
Danny Avila
ed9ab8842a 🔧 chore: Bump Data Provider and Custom Config Versions (#7527)
* 🔧 chore: Update CONFIG_VERSION to 1.2.6

* 🔧 chore: Update librechat-data-provider version to 0.7.85
2025-05-23 17:40:41 -04:00
Danny Avila
b344ed12a1 🔧 refactor: Progress Text Localization for Running Tools (#7526) 2025-05-23 17:40:41 -04:00
Danny Avila
afee1a2cbd 🧹 chore: Bump Agents Dependencies (#7525)
* chore: bump langchain dependencies

* chore: bump @librechat/agents to v2.4.36

* chore: bump @librechat/agents to v2.4.37

* refactor: simplify remark plugins in Markdown component with no conditional usage
2025-05-23 17:40:40 -04:00
Danny Avila
0dbbf7de04 🔎 feat: Native Web Search with Citation References (#7516)
* WIP: search tool integration

* WIP: Add web search capabilities and API key management to agent actions

* WIP: web search capability to agent configuration and selection

* WIP: Add web search capability to backend agent configuration

* WIP: add web search option to default agent form values

* WIP: add attachments for web search

* feat: add plugin for processing web search citations

* WIP: first pass, Citation UI

* chore: remove console.log

* feat: Add AnimatedTabs component for tabbed UI functionality

* refactor: AnimatedTabs component with CSS animations and stable ID generation

* WIP example content

* feat: SearchContext for managing search results apart from MessageContext

* feat: Enhance AnimatedTabs with underline animation and state management

* WIP: first pass, Implement dynamic tab functionality in Sources component with search results integration

* fix: Update class names for improved styling in Sources and AnimatedTabs components

* feat: Improve styling and layout in Sources component with enhanced button and item designs

* feat: Refactor Sources component to integrate OGDialog for source display and improve layout

* style: Update background color in SourceItem and SourcesGroup components for improved visibility

* refactor: Sources component to enhance SourceItem structure and improve favicon handling

* style: Adjust font size of domain text in SourceItem for better readability

* feat: Add localization for citation source and details in CompositeCitation component

* style: add theming to Citation components

* feat: Enhance SourceItem component with dialog support and improved hovercard functionality

* feat: Add localization for sources tab and image alt text in Sources component

* style: Replace divs with spans for better semantic structure in CompositeCitation and Citation components

* refactor: Sources component to use useMemo for tab generation and improve performance

* chore: bump @librechat/agents to v2.4.318

* chore: update search result types

* fix: search results retrieval in ContentParts component, re-render attachments when expected

* feat: update sources style/types to use latest search result structure

* style: enhance Dialog (expanded) SourceItem component with link wrapping and improved styling

* style: update ImageItem component styling for improved title visibility

* refactor: remove SourceItemBase component and adjust SourceItem layout for improved styling

* chore: linting twcss order

* fix: prevent FileAttachment from rendering search attachments

* fix: append underscore to responseMessageId for unique identification to prevent mapping of previous latest message's attachments

* chore: remove unused parameter 'useSpecs' from loadTools function

* chore: twcss order

* WIP: WebSearch Tool UI

* refactor: add limit parameter to StackedFavicons for customizable source display

* refactor: optimize search results memoization by making more granular and separate conerns

* refactor: integrated StackedFavicons to WebSearch mid-run

* chore: bump @librechat/agents to expose handleToolCallChunks

* chore: use typedefs from dedicated file instead of defining them in AgentClient module

* WIP: first pass, search progress results

* refactor: move createOnSearchResults function to a dedicated search module

* chore: bump @librechat/agents to v2.4.320

* WIP: first pass, search results processed UX

* refactor: consolidate context variables in createOnSearchResults function

* chore: bump @librechat/agents to v2.4.321

* feat: add guidelines for web search tool response formatting in loadTools function

* feat: add isLast prop to Part component and update WebSearch logic for improved state handling

* style: update Hovercard styles for improved UI consistency

* feat: export FaviconImage component for improved accessibility in other modules

* refactor: export getCleanDomain function and use FaviconImage in Citation component for improved source representation

* refactor: implement SourceHovercard component for consistency and DRY compliance

* fix: replace <p> with <span> for snippet and title in SourceItem and SourceHovercard for consistency

* style: `not-prose`

* style: remove 'not-prose' class for consistency in SourceItem, Citation, and SourceHovercard components, adjust style classes

* refactor: `imageUrl` on hover and prevent duplicate sources

* refactor: enhance SourcesGroup dialog layout and improve source item presentation

* refactor: reorganize Web Components, save in same directory

* feat: add 'news' refType to refTypeMap for citation sources

* style: adjust Hovercard width for improved layout

* refactor: update tool usage guidelines for improved clarity and execution

* chore: linting

* feat: add Web Search badge with initial permissions and local storage logic

* feat: add webSearch support to interface and permissions schemas

* feat: implement Web Search API key management and localization updates

* feat: refactor Web Search API key handling and integrate new search API key form

* fix: remove unnecessary visibility state from FileAttachment component

* feat: update WebSearch component to use Globe icon and localized search label

* feat: enhance ApiKeyDialog with dropdown for reranker selection and update translations

* feat: implement dropdown menus for engine, scraper, and reranker selection in ApiKeyDialog

* chore: linting and add unknown instead of `any` type

* feat: refactor ApiKeyDialog and useAuthSearchTool for improved API key management

* refactor: update ocrSchema to use template literals for default apiKey and baseURL

* feat: add web search configuration and utility functions for environment variable extraction

* fix: ensure filepath is defined before checking its prefix in useAttachmentHandler

* feat: enhance web search functionality with improved configuration and environment variable extraction for authFields

* fix: update auth type in TPluginAction and TUpdateUserPlugins to use Partial<Record<string, string>>

* feat: implement web search authentication verification and enhance webSearchAuth structure

* feat: enhance ephemeral agent handling with new web search capability and type definition

* feat: enhance isEphemeralAgent function to include web search selection

* feat: refactor verifyWebSearchAuth to improve key handling and authentication checks

* feat: implement loadWebSearchAuth function for improved web search authentication handling

* feat: enhance web search authentication with new configuration options and refactor related types

* refactor: rename search engine to search provider and update related localization keys

* feat: update verifyWebSearchAuth to handle multiple authentication types and improve error handling

* feat: update ApiKeyDialog to accept authTypes prop and remove isUserProvided check

* feat: add tests for extractWebSearchEnvVars and loadWebSearchAuth functions

* feat: enhance loadWebSearchAuth to support specific service checks for providers, scrapers, and rerankers

* fix: update web search configuration key and adjust auth result handling in loadTools function

* feat: add new progress key for repeated web searching and update localization

* chore: bump @librechat/agents to 2.4.322

* feat: enhance loadTools function to include ISO time and improve search tool logging

* feat: update StackedFavicons to handle negative start index and improve citation attribution styling and text

* chore: update .gitignore to categorize AI-related files

* fix: mobile responsiveness of sources/citations hovercards

* feat: enhance source display with improved line clamping for better readability

* chore: bump @librechat/agents to v2.4.33

* feat: add handling for image sources in references mapping

* chore: bump librechat-data-provider version to 0.7.84

* chore: bump @librechat/agents version to 2.4.34

* fix: update auth handling to support multiple auth types in tools and allow key configuration in agent panel

* chore: remove redundant agent attribution text from search form

* fix: web search auth uninstall

* refactor: convert CheckboxButton to a forwardRef component and update setValue callback signature

* feat: add triggerRef prop to ApiKeyDialog components for improved dialog control

* feat: integrate triggerRef in CodeInterpreter and WebSearch components for enhanced dialog management

* feat: enhance ApiKeyDialog with additional links for Firecrawl and Jina API key guidance

* feat: implement web search configuration handling in ApiKeyDialog and add tests for dropdown visibility

* fix: update webSearchConfig reference in config route for correct payload assignment

* feat: update ApiKeyDialog to conditionally render sections based on authTypes and modify loadWebSearchAuth to correctly categorize authentication types

* feat: refactor ApiKeyDialog and related tests to use SearchCategories and RerankerTypes enums and remove nested ternaries

* refactor: move ThinkingButton rendering to improve layout consistency in ContentParts

* feat: integrate search context into Markdown component to conditionally include unicodeCitation plugin

* chore: bump @librechat/agents to v2.4.35

* chore: remove unused 18n key

* ci: add WEB_SEARCH permission testing and update AppService tests for new webSearch configuration

* ci: add more comprehensive tests for loadWebSearchAuth to validate authentication handling and authTypes structure

* chore: remove debugging console log from web.spec.ts to clean up test output
2025-05-23 17:40:40 -04:00
Peter
bf80cf30b3 *️⃣ feat: Reuse OpenID Auth Tokens (#7397)
* feat: integrate OpenID Connect support with token reuse

- Added `jwks-rsa` and `new-openid-client` dependencies for OpenID Connect functionality.
- Implemented OpenID token refresh logic in `AuthController`.
- Enhanced `LogoutController` to handle OpenID logout and session termination.
- Updated JWT authentication middleware to support OpenID token provider.
- Modified OAuth routes to accommodate OpenID authentication and token management.
- Created `setOpenIDAuthTokens` function to manage OpenID tokens in cookies.
- Upgraded OpenID strategy with user info fetching and token exchange protocol.
- Introduced `openIdJwtLogin` strategy for handling OpenID JWT tokens.
- Added caching mechanism for exchanged OpenID tokens.
- Updated configuration to include OpenID exchanged tokens cache key.
- updated .env.example to include the new env variables needed for the feature.

* fix: update return type in downloadImage documentation for clarity and fixed openIdJwtLogin env variables

* fix: update Jest configuration and tests for OpenID strategy integration

* fix: update OpenID strategy to include callback URL in setup

* fix: fix optionalJwtAuth middleware to support OpenID token reuse and improve currentUrl method in CustomOpenIDStrategy to override the dynamic host issue related to proxy (e.g. cloudfront)

* fix: fixed code formatting

* Fix: Add mocks for openid-client and passport strategy in Jest configuration to fix unit tests

* fix eslint errors: Format mock file openid-client.

*  feat: Add PKCE support for OpenID and default handling in strategy setup

---------

Co-authored-by: Atef Bellaaj <slalom.bellaaj@external.daimlertruck.com>
Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>
2025-05-23 17:40:40 -04:00
matt burnett
d47d827ed9 feat: Agent Version History and Management (#7455)
*  feat: Enhance agent update functionality to save current state in versions array

- Updated the `updateAgent` function to push the current agent's state into a new `versions` array when an agent is updated.
- Modified the agent schema to include a `versions` field for storing historical states of agents.

*  feat: Add comprehensive CRUD operations for agents in tests

- Introduced a new test suite for CRUD operations on agents, including create, read, update, and delete functionalities.
- Implemented tests for listing agents by author and updating agent projects.
- Enhanced the agent model to support version history tracking during updates.
- Ensured proper environment variable management during tests.

*  feat: Introduce version tracking for agents and enhance UI components

- Added a `version` property to the agent model to track the number of versions.
- Updated the `getAgentHandler` to include the agent's version in the response.
- Introduced a new `VersionButton` component for navigating to the version panel.
- Created a `VersionPanel` component for displaying version-related information.
- Updated the UI to conditionally render the version button and panel based on the active state.
- Added localization for the new version-related UI elements.

*  i18n: Add "version" translation key across multiple languages

- Introduced the "com_ui_agent_version" translation key in various language files to support version tracking for agents.
- Updated Arabic, Czech, German, English, Spanish, Estonian, Persian, Finnish, French, Hebrew, Hungarian, Indonesian, Italian, Japanese, Korean, Dutch, Polish, Portuguese (Brazil and Portugal), Russian, Swedish, Thai, Turkish, Vietnamese, and Chinese (Simplified and Traditional) translations.

*  feat: Update AgentFooter to conditionally render AdminSettings

- Modified the logic for displaying buttons in the AgentFooter component to only show them when the active panel is the builder.
- Ensured that AdminSettings is displayed only when the user has an admin role and the buttons are visible.

*  feat: Enhance AgentPanelSwitch and VersionPanel for improved agent capabilities

- Updated AgentPanelSwitch to include a new VersionPanel for displaying version-related information.
- Enhanced agentsConfig logic to properly handle agent capabilities.
- Modified VersionPanel to improve structure and localization support.
- Integrated createAgent mutation for future agent creation functionality.

*  feat: Enhance VersionPanel to display agent version history and loading states

- Integrated version fetching logic in VersionPanel to retrieve and display agent version history.
- Added loading and error handling states to improve user experience.
- Updated agent schema to use mixed types for versions, allowing for more flexible version data structures.
- Introduced localization support for version-related UI elements.

*  feat: Update VersionPanel and AgentPanelSwitch to enhance agent selection and version display

- Modified AgentPanelSwitch to pass selectedAgentId to VersionPanel for improved agent context.
- Enhanced VersionPanel to handle multiple timestamp formats and display appropriate messages when no agent is selected.
- Improved structure and readability of the VersionPanel component by adding a helper function for timestamp retrieval.

*  feat: Refactor VersionPanel to utilize localization and improve timestamp handling

- Replaced hardcoded text constants with localization support for various UI elements in VersionPanel.
- Enhanced the timestamp retrieval function to handle errors gracefully and utilize localized messages for unknown dates.
- Improved user feedback by displaying localized messages for agent selection, version errors, and empty states.

*  refactor: Clean up VersionPanel by removing unused code and improving timestamp handling

*  feat: Implement agent version reverting functionality

- Added `revertAgentVersion` method in the Agent model to allow reverting to a previous version of an agent.
- Introduced `revertAgentVersionHandler` in the agents controller to handle requests for reverting agent versions.
- Updated API routes to include a new endpoint for reverting agent versions.
- Enhanced the VersionPanel component to support version restoration with user confirmation and feedback.
- Added localization support for success and error messages related to version restoration.

*  i18n: Add localization for agent version restoration messages

* Simplify VersionPanel by removing unused parameters and enhancing agent ID handling

* Refactor Agent model and VersionPanel component to streamline version data handling

* Update version handling in Agent model and VersionPanel

- Enhanced the Agent model to include an `updatedAt` timestamp when pushing new versions.
- Improved the VersionPanel component to sort versions by the `updatedAt` timestamp for better display order.
- Added a new localization entry for indicating the active version of an agent.

*  i18n: Add localization for active agent version across multiple languages

*  feat: Introduce version management components for agent history

- Added `isActiveVersion` utility to determine the active version of an agent based on various criteria.
- Implemented `VersionContent` and `VersionItem` components to display agent version history, including loading and error states.
- Enhanced `VersionPanel` to integrate new components and manage version context effectively.
- Added comprehensive tests for version management functionalities to ensure reliability and correctness.

* Add unit tests for AgentFooter component

* cleanup

* Enhance agent version update handling and add unit tests for update operators

- Updated the `updateAgent` function to properly handle various update operators ($push, $pull, $addToSet) while maintaining version history.
- Modified unit tests to validate the correct behavior of agent updates, including versioning and tool management.

* Enhance version comparison logic and update tests for artifacts handling

- Modified the `isActiveVersion` utility to include artifacts in the version comparison criteria.
- Updated the `VersionPanel` component to support artifacts in the agent state.
- Added new unit tests to validate artifacts matching scenarios and edge cases in the `isActiveVersion` function.

* Implement duplicate version detection in agent updates and enhance error handling

- Added `isDuplicateVersion` function to check for identical versions during agent updates, excluding certain fields.
- Updated `updateAgent` function to throw an error if a duplicate version is detected, with detailed error information.
- Enhanced the `updateAgentHandler` to return appropriate responses for duplicate version errors.
- Modified client-side error handling to display user-friendly messages for duplicate version scenarios.
- Added comprehensive unit tests to validate duplicate version detection and error handling across various update scenarios.

* Update version title localization to include version number across multiple languages

- Modified the `com_ui_agent_version_title` translation key to include a placeholder for the version number in various language files.
- Enhanced the `VersionItem` component to utilize the updated localization for displaying version titles dynamically.

* Enhance agent version handling and add revert functionality

- Updated the `isDuplicateVersion` function to improve version comparison logic, including special handling for `projectIds` and arrays of objects.
- Modified the `updateAgent` function to streamline version updates and removed unnecessary checks for test environments.
- Introduced a new `revertAgentVersion` function to allow reverting agents to specific versions, with detailed documentation.
- Enhanced unit tests to validate duplicate version detection and revert functionality, ensuring robust error handling and version management.

* fix CI issues

* cleanup

* Revert all non-English translations

* clean up tests
2025-05-23 17:40:39 -04:00
Austin Barrington
5be446edff 🪖 chore: bump helm app version to v0.7.8 (#7524)
- bump helm app version to match the latest
      release version
2025-05-23 17:39:42 -04:00
Danny Avila
2265413387 🪨 feat: Bedrock Support for Claude-4 Reasoning (#7517)
* 🗑️ chore: Update .gitignore to reflect AI-related files

* chore: linting in Bedrock options.js

* 🪨 feat: Bedrock Claude-4 Reasoning
2025-05-23 00:42:51 -04:00
hofq
7e98702a87 📊 chore: Remove Old Helm Chart (#7512)
Co-authored-by: hofq <gregorspalme@protonmail.com>
2025-05-22 23:53:19 -04:00
Danny Avila
a2f330e6ca 🦾 feat: Claude-4 Support (#7509)
* refactor: Update AnthropicClient to support Claude model naming changes

* Renamed `isClaude3` to `isClaudeLatest` to accommodate newer Claude models.
* Updated logic to determine if the model is part of the Claude family.
* Adjusted `useMessages` property to reflect the new model naming convention.
* Cleaned up client properties during disposal to match the updated naming.

* feat: Claude-4 Support

* feat: Add Thinking and Prompt caching support for Claude 4

* chore: Update ANTHROPIC_MODELS in .env.example for latest model versions
2025-05-22 15:00:44 -04:00
365 changed files with 17507 additions and 3915 deletions

View File

@@ -20,8 +20,8 @@ DOMAIN_CLIENT=http://localhost:3080
DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
# Defaulted to 1.
TRUST_PROXY=1
@@ -88,7 +88,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_MODELS=claude-opus-4-20250514,claude-sonnet-4-20250514,claude-3-7-sonnet-20250219,claude-3-5-sonnet-20241022,claude-3-5-haiku-20241022,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -443,6 +443,47 @@ OPENID_IMAGE_URL=
# Set to true to automatically redirect to the OpenID provider when a user visits the login page
# This will bypass the login form completely for users, only use this if OpenID is your only authentication method
OPENID_AUTO_REDIRECT=false
# Set to true to use PKCE (Proof Key for Code Exchange) for OpenID authentication
OPENID_USE_PKCE=false
#Set to true to reuse openid tokens for authentication management instead of using the mongodb session and the custom refresh token.
OPENID_REUSE_TOKENS=
#By default, signing key verification results are cached in order to prevent excessive HTTP requests to the JWKS endpoint.
#If a signing key matching the kid is found, this will be cached and the next time this kid is requested the signing key will be served from the cache.
#Default is true.
OPENID_JWKS_URL_CACHE_ENABLED=
OPENID_JWKS_URL_CACHE_TIME= # 600000 ms eq to 10 minutes leave empty to disable caching
#Set to true to trigger token exchange flow to acquire access token for the userinfo endpoint.
OPENID_ON_BEHALF_FLOW_FOR_USERINFRO_REQUIRED=
OPENID_ON_BEHALF_FLOW_USERINFRO_SCOPE = "user.read" # example for Scope Needed for Microsoft Graph API
# Set to true to use the OpenID Connect end session endpoint for logout
OPENID_USE_END_SESSION_ENDPOINT=
# SAML
# Note: If OpenID is enabled, SAML authentication will be automatically disabled.
SAML_ENTRY_POINT=
SAML_ISSUER=
SAML_CERT=
SAML_CALLBACK_URL=/oauth/saml/callback
SAML_SESSION_SECRET=
# Attribute mappings (optional)
SAML_EMAIL_CLAIM=
SAML_USERNAME_CLAIM=
SAML_GIVEN_NAME_CLAIM=
SAML_FAMILY_NAME_CLAIM=
SAML_PICTURE_CLAIM=
SAML_NAME_CLAIM=
# Logint buttion settings (optional)
SAML_BUTTON_LABEL=
SAML_IMAGE_URL=
# Whether the SAML Response should be signed.
# - If "true", the entire `SAML Response` will be signed.
# - If "false" or unset, only the `SAML Assertion` will be signed (default behavior).
# SAML_USE_AUTHN_RESPONSE_SIGNED=
# LDAP
LDAP_URL=
@@ -575,3 +616,33 @@ HELP_AND_FAQ_URL=https://librechat.ai
# OpenWeather #
#=====================================================#
OPENWEATHER_API_KEY=
#====================================#
# LibreChat Code Interpreter API #
#====================================#
# https://code.librechat.ai
# LIBRECHAT_CODE_API_KEY=your-key
#======================#
# Web Search #
#======================#
# Note: All of the following variable names can be customized.
# Omit values to allow user to provide them.
# For more information on configuration values, see:
# https://librechat.ai/docs/features/web_search
# Search Provider (Required)
# SERPER_API_KEY=your_serper_api_key
# Scraper (Required)
# FIRECRAWL_API_KEY=your_firecrawl_api_key
# Optional: Custom Firecrawl API URL
# FIRECRAWL_API_URL=your_firecrawl_api_url
# Reranker (Required)
# JINA_API_KEY=your_jina_api_key
# or
# COHERE_API_KEY=your_cohere_api_key

View File

@@ -26,6 +26,10 @@ jobs:
uses: azure/setup-helm@v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Build Subchart Deps
run: |
cd helm/librechat-rag-api
helm dependency build
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0

5
.gitignore vendored
View File

@@ -52,8 +52,9 @@ bower_components/
*.d.ts
!vite-env.d.ts
# Cline
# AI
.clineignore
.cursor
# Floobits
.floo
@@ -121,3 +122,5 @@ helm/**/.values.yaml
!/client/src/@types/i18next.d.ts
# SAML Idp cert
*.cert

View File

@@ -6,6 +6,7 @@ All notable changes to this project will be documented in this file.
## [Unreleased]
### ✨ New Features
@@ -15,11 +16,14 @@ All notable changes to this project will be documented in this file.
- 🔒 feat: Add Content Security Policy using Helmet middleware by **@rubentalstra** in [#7377](https://github.com/danny-avila/LibreChat/pull/7377)
- ✨ feat: Add Normalization for MCP Server Names by **@danny-avila** in [#7421](https://github.com/danny-avila/LibreChat/pull/7421)
- 📊 feat: Improve Helm Chart by **@hofq** in [#3638](https://github.com/danny-avila/LibreChat/pull/3638)
- 🦾 feat: Claude-4 Support by **@danny-avila** in [#7509](https://github.com/danny-avila/LibreChat/pull/7509)
- 🪨 feat: Bedrock Support for Claude-4 Reasoning by **@danny-avila** in [#7517](https://github.com/danny-avila/LibreChat/pull/7517)
### 🌍 Internationalization
- 🌍 i18n: Add `Danish` and `Czech` and `Catalan` localization support by **@rubentalstra** in [#7373](https://github.com/danny-avila/LibreChat/pull/7373)
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7375](https://github.com/danny-avila/LibreChat/pull/7375)
- 🌍 i18n: Update translation.json with latest translations by **@github-actions[bot]** in [#7468](https://github.com/danny-avila/LibreChat/pull/7468)
### 🔧 Fixes
@@ -37,6 +41,11 @@ All notable changes to this project will be documented in this file.
- 📜 docs: CHANGELOG for release v0.7.8 by **@github-actions[bot]** in [#7290](https://github.com/danny-avila/LibreChat/pull/7290)
- 📦 chore: Update API Package Dependencies by **@danny-avila** in [#7359](https://github.com/danny-avila/LibreChat/pull/7359)
- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7321](https://github.com/danny-avila/LibreChat/pull/7321)
- 📜 docs: Unreleased Changelog by **@github-actions[bot]** in [#7434](https://github.com/danny-avila/LibreChat/pull/7434)
- 🛡️ chore: `multer` v2.0.0 for CVE-2025-47935 and CVE-2025-47944 by **@danny-avila** in [#7454](https://github.com/danny-avila/LibreChat/pull/7454)
- 📂 refactor: Improve `FileAttachment` & File Form Deletion by **@danny-avila** in [#7471](https://github.com/danny-avila/LibreChat/pull/7471)
- 📊 chore: Remove Old Helm Chart by **@hofq** in [#7512](https://github.com/danny-avila/LibreChat/pull/7512)
- 🪖 chore: bump helm app version to v0.7.8 by **@austin-barrington** in [#7524](https://github.com/danny-avila/LibreChat/pull/7524)

View File

@@ -71,6 +71,11 @@
- [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions
- 🔍 **Web Search**:
- Search the internet and retrieve relevant information to enhance your AI context
- Combines search providers, content scrapers, and result rerankers for optimal results
- **[Learn More →](https://www.librechat.ai/docs/features/web_search)**
- 🪄 **Generative UI with Code Artifacts**:
- [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat

View File

@@ -70,13 +70,10 @@ class AnthropicClient extends BaseClient {
this.message_delta;
/** Whether the model is part of the Claude 3 Family
* @type {boolean} */
this.isClaude3;
this.isClaudeLatest;
/** Whether to use Messages API or Completions API
* @type {boolean} */
this.useMessages;
/** Whether or not the model is limited to the legacy amount of output tokens
* @type {boolean} */
this.isLegacyOutput;
/** Whether or not the model supports Prompt Caching
* @type {boolean} */
this.supportsCacheControl;
@@ -116,21 +113,25 @@ class AnthropicClient extends BaseClient {
);
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.includes('claude-3');
this.isLegacyOutput = !(
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
this.isClaudeLatest =
/claude-[3-9]/.test(modelMatch) || /claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch);
const isLegacyOutput = !(
/claude-3[-.]5-sonnet/.test(modelMatch) ||
/claude-3[-.]7/.test(modelMatch) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
/claude-[4-9]/.test(modelMatch)
);
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
isLegacyOutput &&
this.modelOptions.maxOutputTokens &&
this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
) {
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
}
this.useMessages = this.isClaude3 || !!this.options.attachments;
this.useMessages = this.isClaudeLatest || !!this.options.attachments;
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
@@ -654,7 +655,10 @@ class AnthropicClient extends BaseClient {
);
};
if (this.modelOptions.model.includes('claude-3')) {
if (
/claude-[3-9]/.test(this.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(this.modelOptions.model)
) {
await buildMessagesPayload();
processTokens();
return {

View File

@@ -244,9 +244,9 @@ class ChatGPTClient extends BaseClient {
baseURL = this.langchainProxy
? constructAzureURL({
baseURL: this.langchainProxy,
azureOptions: this.azure,
})
baseURL: this.langchainProxy,
azureOptions: this.azure,
})
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
if (this.options.forcePrompt) {
@@ -339,7 +339,6 @@ class ChatGPTClient extends BaseClient {
opts.body = JSON.stringify(modelOptions);
if (modelOptions.stream) {
return new Promise(async (resolve, reject) => {
try {
let done = false;

View File

@@ -236,11 +236,11 @@ class GoogleClient extends BaseClient {
msg.content = (
!Array.isArray(msg.content)
? [
{
type: ContentTypes.TEXT,
[ContentTypes.TEXT]: msg.content,
},
]
{
type: ContentTypes.TEXT,
[ContentTypes.TEXT]: msg.content,
},
]
: msg.content
).concat(message.image_urls);

View File

@@ -67,7 +67,7 @@ class OllamaClient {
return models;
} catch (error) {
const logMessage =
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
"Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn't start with `ollama` (case-insensitive).";
logAxiosError({ message: logMessage, error });
return [];
}

View File

@@ -22,17 +22,17 @@
'\n' +
'Celestia had the power to grant one wish to anyone who dared to find her abode. Ethan, captivated by the tales he had read and yearning for something greater, approached the cottage with trepidation. When he shared his desire to embark on a grand adventure, Celestia smiled warmly and agreed to grant his wish.\n' +
'\n' +
'With a wave of her wand and a sprinkle of stardust, Celestia bestowed upon Ethan a magical necklace. This necklace, adorned with a rare gemstone called the Eye of Imagination, had the power to turn dreams and imagination into reality. From that moment forward, Ethan\'s every thought and idea became manifest.\n' +
"With a wave of her wand and a sprinkle of stardust, Celestia bestowed upon Ethan a magical necklace. This necklace, adorned with a rare gemstone called the Eye of Imagination, had the power to turn dreams and imagination into reality. From that moment forward, Ethan's every thought and idea became manifest.\n" +
'\n' +
'Energized by this newfound power, Ethan continued his journey, encountering mythical creatures, solving riddles, and overcoming treacherous obstacles along the way. With the Eye of Imagination, he brought life to ancient statues, unlocked hidden doors, and even tamed fiery dragons.\n' +
'\n' +
'As days turned into weeks and weeks into months, Ethan became wiser and more in tune with the world around him. He learned that true adventure was not merely about seeking thrills and conquering the unknown, but also about fostering compassion, friendship, and a deep appreciation for the beauty of the ordinary.\n' +
'\n' +
'Eventually, Ethan\'s journey led him back to his village. With the Eye of Imagination, he transformed the village into a place of wonders and endless possibilities. Fields blossomed into vibrant gardens, simple tools turned into intricate works of art, and the villagers felt a renewed sense of hope and inspiration.\n' +
"Eventually, Ethan's journey led him back to his village. With the Eye of Imagination, he transformed the village into a place of wonders and endless possibilities. Fields blossomed into vibrant gardens, simple tools turned into intricate works of art, and the villagers felt a renewed sense of hope and inspiration.\n" +
'\n' +
'Ethan, now known as the Village Magician, realized that the true magic lied within everyone\'s hearts. He taught the villagers to embrace their creativity, to dream big, and to never underestimate the power of imagination. And so, the village flourished, becoming a beacon of wonder and creativity for all to see.\n' +
"Ethan, now known as the Village Magician, realized that the true magic lied within everyone's hearts. He taught the villagers to embrace their creativity, to dream big, and to never underestimate the power of imagination. And so, the village flourished, becoming a beacon of wonder and creativity for all to see.\n" +
'\n' +
'In the years that followed, Ethan\'s adventures continued, though mostly within the confines of his beloved village. But he never forgot the thrill of that first grand adventure. And every now and then, when looking up at the starry night sky, he would allow his mind to wander, knowing that the greatest adventures were still waiting to be discovered.',
"In the years that followed, Ethan's adventures continued, though mostly within the confines of his beloved village. But he never forgot the thrill of that first grand adventure. And every now and then, when looking up at the starry night sky, he would allow his mind to wander, knowing that the greatest adventures were still waiting to be discovered.",
},
{
role: 'user',
@@ -41,30 +41,30 @@
'\n' +
'Once there was a young lad by the name of Ethan, raised in a little hamlet nestled betwixt the verdant knolls, who possessed an irrepressible yearning for knowledge, a thirst unquenchable and a spirit teeming with curiosity. As the golden sun bathed the bucolic land in its effulgent light, he would tread through the village, his ears attuned to the tales spun by the townsfolk, his eyes absorbing the tapestry woven by the world surrounding him.\n' +
'\n' +
'One radiant day, whilst exploring the periphery of the settlement, Ethan chanced upon a timeworn tome, ensconced amidst the roots of an ancient oak, cloaked in the shroud of neglect. The dust gathered upon it spoke of time\'s relentless march. A book of fairy tales garnished with vivid descriptions of mystical woods, fantastical beasts, and ventures daring beyond the ordinary humdrum existence. Intrigued and beguiled, Ethan pried open the weathered pages and succumbed to their beckoning whispers.\n' +
"One radiant day, whilst exploring the periphery of the settlement, Ethan chanced upon a timeworn tome, ensconced amidst the roots of an ancient oak, cloaked in the shroud of neglect. The dust gathered upon it spoke of time's relentless march. A book of fairy tales garnished with vivid descriptions of mystical woods, fantastical beasts, and ventures daring beyond the ordinary humdrum existence. Intrigued and beguiled, Ethan pried open the weathered pages and succumbed to their beckoning whispers.\n" +
'\n' +
'In each tale, he was transported to a realm of enchantment and wonderment, inexorably tugging at the strings of his yearning for peripatetic exploration. Inspired by the narratives he had devoured, Ethan resolved to bid adieu to kinfolk and embark upon a sojourn, with dreams of procuring a firsthand glimpse into the domain of mystique that lay beyond the village\'s circumscribed boundary.\n' +
"In each tale, he was transported to a realm of enchantment and wonderment, inexorably tugging at the strings of his yearning for peripatetic exploration. Inspired by the narratives he had devoured, Ethan resolved to bid adieu to kinfolk and embark upon a sojourn, with dreams of procuring a firsthand glimpse into the domain of mystique that lay beyond the village's circumscribed boundary.\n" +
'\n' +
'Thus, he bade tearful farewells, girding himself for a path that guided him to a dense and captivating woodland, whispered of as a sanctuary to mythical beings and clandestine troves of treasures. As Ethan plunged deeper into the heart of the arboreal labyrinth, he felt a palpable surge of electricity, as though the sylvan sentinels whispered enigmatic secrets that only the perceptive ear could discern.\n' +
'\n' +
'It wasn\'t long before his path intertwined with that of a capricious sprite christened Sparkle, bearing an impish grin and eyes sparkling with mischief. Sparkle played the role of Virgil to Ethan\'s Dante, guiding him through the intricate tapestry of arboreal scions, issuing warnings of perils concealed and spinning tales of ancient entities that called this very bosky enclave home.\n' +
"It wasn't long before his path intertwined with that of a capricious sprite christened Sparkle, bearing an impish grin and eyes sparkling with mischief. Sparkle played the role of Virgil to Ethan's Dante, guiding him through the intricate tapestry of arboreal scions, issuing warnings of perils concealed and spinning tales of ancient entities that called this very bosky enclave home.\n" +
'\n' +
'Together, they stumbled upon a luminous lake, its shimmering waters imbued with a celestial light. At the center lay a diminutive island, upon which reposed a cottage fashioned from tender petals and verdant leaves. It belonged to an ancient sorceress of considerable wisdom, Celestia by name.\n' +
'\n' +
'Celestia, with her power to bestow a single wish on any intrepid soul who happened upon her abode, met Ethan\'s desire with a congenial nod, his fervor for a grand expedition not lost on her penetrating gaze. In response, she bequeathed unto him a necklace of magical manufacture adorned with the rare gemstone known as the Eye of Imagination whose very essence transformed dreams into vivid reality. From that moment forward, not a single cogitation nor nebulous fanciful notion of Ethan\'s ever lacked physicality.\n' +
"Celestia, with her power to bestow a single wish on any intrepid soul who happened upon her abode, met Ethan's desire with a congenial nod, his fervor for a grand expedition not lost on her penetrating gaze. In response, she bequeathed unto him a necklace of magical manufacture adorned with the rare gemstone known as the Eye of Imagination whose very essence transformed dreams into vivid reality. From that moment forward, not a single cogitation nor nebulous fanciful notion of Ethan's ever lacked physicality.\n" +
'\n' +
'Energized by this newfound potency, Ethan continued his sojourn, encountering mythical creatures, unraveling cerebral enigmas, and braving perils aplenty along the winding roads of destiny. Armed with the Eye of Imagination, he brought forth life from immobile statuary, unlocked forbidding portals, and even tamed the ferocious beasts of yore their fiery breath reduced to a whisper.\n' +
'\n' +
'As the weeks metamorphosed into months, Ethan grew wiser and more attuned to the ebb and flow of the world enveloping him. He gleaned that true adventure isn\'t solely confined to sating a thirst for adrenaline and conquering the unknown; indeed, it resides in fostering compassion, fostering amicable bonds, and cherishing the beauty entwined within the quotidian veld.\n' +
"As the weeks metamorphosed into months, Ethan grew wiser and more attuned to the ebb and flow of the world enveloping him. He gleaned that true adventure isn't solely confined to sating a thirst for adrenaline and conquering the unknown; indeed, it resides in fostering compassion, fostering amicable bonds, and cherishing the beauty entwined within the quotidian veld.\n" +
'\n' +
'Eventually, Ethan\'s quest drew him homeward, back to his village. Buoying the Eye of Imagination\'s ethereal power, he imbued the hitherto unremarkable settlement with the patina of infinite possibilities. The bounteous fields bloomed into kaleidoscopic gardens, simple instruments transmuting into intricate masterpieces, and the villagers themselves clasped within their hearts a renewed ardor, a conflagration of hope and inspiration.\n' +
"Eventually, Ethan's quest drew him homeward, back to his village. Buoying the Eye of Imagination's ethereal power, he imbued the hitherto unremarkable settlement with the patina of infinite possibilities. The bounteous fields bloomed into kaleidoscopic gardens, simple instruments transmuting into intricate masterpieces, and the villagers themselves clasped within their hearts a renewed ardor, a conflagration of hope and inspiration.\n" +
'\n' +
'Behold Ethan, at present hailed as the Village Magician a cognomen befitting his sorcery wielded within the confines of the community he adored. His exploits may have become tethered to a geographical locale, but the flame kindled by that premier sojourn never flickered. Occasionally, beneath the veiled twilight canopy, when peering skyward toward the astral canvas bedecked with countless luminescent diamonds, he\'d allow his mind to wander, cognizant of boundless adventures yet to take flight, forever awaiting his indomitable spirit.',
"Behold Ethan, at present hailed as the Village Magician a cognomen befitting his sorcery wielded within the confines of the community he adored. His exploits may have become tethered to a geographical locale, but the flame kindled by that premier sojourn never flickered. Occasionally, beneath the veiled twilight canopy, when peering skyward toward the astral canvas bedecked with countless luminescent diamonds, he'd allow his mind to wander, cognizant of boundless adventures yet to take flight, forever awaiting his indomitable spirit.",
},
{
role: 'assistant',
content:
'Beautifully rewritten! It captures the essence of Cormac McCarthy\'s style, with its descriptive language and evocative imagery. The story retains the spirit of adventure and the power of imagination seen in the original. Well done! If you have any more requests or if there\'s anything else I can assist you with, feel free to let me know.',
"Beautifully rewritten! It captures the essence of Cormac McCarthy's style, with its descriptive language and evocative imagery. The story retains the spirit of adventure and the power of imagination seen in the original. Well done! If you have any more requests or if there's anything else I can assist you with, feel free to let me know.",
},
{
role: 'user',
@@ -73,19 +73,19 @@
{
role: 'assistant',
content:
'Certainly! Here\'s a short story reflecting Cormac McCarthy\'s style:\n' +
"Certainly! Here's a short story reflecting Cormac McCarthy's style:\n" +
'\n' +
'In a desolate wasteland scarred by the sins of mankind, a lone traveler ventured forth, his name forgotten by time, etched upon the shadows of memory. He navigated the barren plains, where the sun beat down, merciless and unrelenting, scorching the parched earth beneath his weary boots.\n' +
'\n' +
'Desolation was his only companion, the forsaken landscape a testament to the darkness that had befallen this forsaken realm. The wind whispered secrets long lost, carrying with it the echoes of a civilization long since undone, swallowed by the abyss.\n' +
'\n' +
'Amongst the shattered remnants of fallen cities, he sought solace, a glimmer of hope amidst the desolation. Rusted remnants of steel and concrete stood as sentinels of mankind\'s hubris, their jagged edges a chilling reminder of the fragility of all that was once deemed invincible.\n' +
"Amongst the shattered remnants of fallen cities, he sought solace, a glimmer of hope amidst the desolation. Rusted remnants of steel and concrete stood as sentinels of mankind's hubris, their jagged edges a chilling reminder of the fragility of all that was once deemed invincible.\n" +
'\n' +
'As twilight descended, casting a pallid hue upon the lifeless land, he happened upon a dilapidated house, its walls crumbling under the weight of time. Drawn to the desolation, he pushed open the creaking door, its rusty hinges screaming in protest.\n' +
'\n' +
'Inside, a world frozen in time awaited him. Dust particles danced through the air, illuminated by slivers of light that pierced the broken windows. Here, the past lay trapped, a melancholic tableau of lives lost and dreams shattered.\n' +
'\n' +
'He wandered through the empty rooms, bare walls whispering stories untold. In the corner, a dilapidated piano stood, its ivory keys long since abandoned by a musician\'s touch. He approached it cautiously, a faint glimmer of anticipation igniting in his weather-worn eyes.\n' +
"He wandered through the empty rooms, bare walls whispering stories untold. In the corner, a dilapidated piano stood, its ivory keys long since abandoned by a musician's touch. He approached it cautiously, a faint glimmer of anticipation igniting in his weather-worn eyes.\n" +
'\n' +
'Determined, he carefully brushed away the dust and debris that cloaked the instrument. With trembling hands, he pressed a key, and a discordant note filled the air, mingling with the sighs of forgotten melodies. The sound echoed through the silence, bringing with it the remnants of forgotten symphonies.\n' +
'\n' +
@@ -103,13 +103,13 @@
{
role: 'assistant',
content:
'Thank you! I\'m glad you enjoyed it. If you have any more writing requests or if there\'s anything else I can assist you with, feel free to let me know.',
"Thank you! I'm glad you enjoyed it. If you have any more writing requests or if there's anything else I can assist you with, feel free to let me know.",
},
{ role: 'user', content: 'you are very helpful' },
{
role: 'assistant',
content:
'Thank you for your kind words! I\'m here to assist you in any way I can. If you have any more questions, need further assistance, or just want to chat, feel free to reach out.',
"Thank you for your kind words! I'm here to assist you in any way I can. If you have any more questions, need further assistance, or just want to chat, feel free to reach out.",
},
{ role: 'user', content: 'no you man' },
];

View File

@@ -9,7 +9,7 @@ const chatPromptMemory = new ConversationSummaryBufferMemory({
});
(async () => {
await chatPromptMemory.saveContext({ input: 'hi my name\'s Danny' }, { output: 'whats up' });
await chatPromptMemory.saveContext({ input: "hi my name's Danny" }, { output: 'whats up' });
await chatPromptMemory.saveContext({ input: 'not much you' }, { output: 'not much' });
await chatPromptMemory.saveContext(
{ input: 'are you excited for the olympics?' },

View File

@@ -74,7 +74,7 @@ describe('addImages', () => {
it('should append correctly from a real scenario', () => {
responseMessage.text =
'Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there\'s a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?';
"Here is the generated image based on your request. It depicts a surreal landscape filled with floating musical notes. The style is impressionistic, with vibrant sunset hues dominating the scene. At the center, there's a silhouette of a grand piano, adding a dreamy emotion to the overall image. This could serve as a unique and creative music album cover. Would you like to make any changes or generate another image?";
const originalText = responseMessage.text;
const imageMarkdown = '![generated image](/images/img-RnVWaYo2Yg4x3e0isICiMuf5.png)';
intermediateSteps.push({ observation: imageMarkdown });

View File

@@ -65,14 +65,14 @@ function buildPromptPrefix({ result, message, functionsAgent }) {
const preliminaryAnswer =
result.output?.length > 0 ? `Preliminary Answer: "${result.output.trim()}"` : '';
const prefix = preliminaryAnswer
? 'review and improve the answer you generated using plugins in response to the User Message below. The user hasn\'t seen your answer or thoughts yet.'
? "review and improve the answer you generated using plugins in response to the User Message below. The user hasn't seen your answer or thoughts yet."
: 'respond to the User Message below based on your preliminary thoughts & actions.';
return `As a helpful AI Assistant, ${prefix}${errorMessage}\n${internalActions}
${preliminaryAnswer}
Reply conversationally to the User based on your ${
preliminaryAnswer ? 'preliminary answer, ' : ''
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
preliminaryAnswer ? 'preliminary answer, ' : ''
}internal actions, thoughts, and observations, making improvements wherever possible, but do not modify URLs.
${
preliminaryAnswer
? ''

View File

@@ -6,7 +6,7 @@ describe('addCacheControl', () => {
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'Hi there' }] },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'I\'m doing well, thanks!' }] },
{ role: 'assistant', content: [{ type: 'text', text: "I'm doing well, thanks!" }] },
{ role: 'user', content: [{ type: 'text', text: 'Great!' }] },
];
@@ -22,7 +22,7 @@ describe('addCacheControl', () => {
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
{ role: 'assistant', content: 'I\'m doing well, thanks!' },
{ role: 'assistant', content: "I'm doing well, thanks!" },
{ role: 'user', content: 'Great!' },
];
@@ -140,7 +140,7 @@ describe('addCacheControl', () => {
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
{ role: 'assistant', content: 'I\'m doing well, thanks!' },
{ role: 'assistant', content: "I'm doing well, thanks!" },
{ role: 'user', content: 'Great!' },
];
@@ -160,7 +160,7 @@ describe('addCacheControl', () => {
},
]);
expect(result[1].content).toBe('Hi there');
expect(result[3].content).toBe('I\'m doing well, thanks!');
expect(result[3].content).toBe("I'm doing well, thanks!");
});
test('should handle edge case with multiple content types', () => {

View File

@@ -3,7 +3,6 @@ const { EModelEndpoint, ArtifactModes } = require('librechat-data-provider');
const { generateShadcnPrompt } = require('~/app/clients/prompts/shadcn-docs/generate');
const { components } = require('~/app/clients/prompts/shadcn-docs/components');
// eslint-disable-next-line no-unused-vars
const artifactsPromptV1 = dedent`The assistant can create and reference artifacts during conversations.
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.

View File

@@ -96,35 +96,35 @@ function createContextHandlers(req, userMessageContent) {
resolvedQueries.length === 0
? '\n\tThe semantic search did not return any results.'
: resolvedQueries
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
const generateContext = (currentContext) =>
`
const generateContext = (currentContext) =>
`
<file>
<filename>${file.filename}</filename>
<context>${currentContext}
</context>
</file>`;
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
<contextItem>
<![CDATA[${pageContent?.trim()}]]>
</contextItem>`;
})
.join('');
})
.join('');
return generateContext(contextItems);
})
.join('');
return generateContext(contextItems);
})
.join('');
if (useFullContext) {
const prompt = `${header}

View File

@@ -130,7 +130,7 @@ describe('formatAgentMessages', () => {
content: [
{
type: ContentTypes.TEXT,
[ContentTypes.TEXT]: 'I\'ll search for that information.',
[ContentTypes.TEXT]: "I'll search for that information.",
tool_call_ids: ['search_1'],
},
{
@@ -144,7 +144,7 @@ describe('formatAgentMessages', () => {
},
{
type: ContentTypes.TEXT,
[ContentTypes.TEXT]: 'Now, I\'ll convert the temperature.',
[ContentTypes.TEXT]: "Now, I'll convert the temperature.",
tool_call_ids: ['convert_1'],
},
{
@@ -156,7 +156,7 @@ describe('formatAgentMessages', () => {
output: '23.89°C',
},
},
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s your answer.' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: "Here's your answer." },
],
},
];
@@ -171,7 +171,7 @@ describe('formatAgentMessages', () => {
expect(result[4]).toBeInstanceOf(AIMessage);
// Check first AIMessage
expect(result[0].content).toBe('I\'ll search for that information.');
expect(result[0].content).toBe("I'll search for that information.");
expect(result[0].tool_calls).toHaveLength(1);
expect(result[0].tool_calls[0]).toEqual({
id: 'search_1',
@@ -187,7 +187,7 @@ describe('formatAgentMessages', () => {
);
// Check second AIMessage
expect(result[2].content).toBe('Now, I\'ll convert the temperature.');
expect(result[2].content).toBe("Now, I'll convert the temperature.");
expect(result[2].tool_calls).toHaveLength(1);
expect(result[2].tool_calls[0]).toEqual({
id: 'convert_1',
@@ -202,7 +202,7 @@ describe('formatAgentMessages', () => {
// Check final AIMessage
expect(result[4].content).toStrictEqual([
{ [ContentTypes.TEXT]: 'Here\'s your answer.', type: ContentTypes.TEXT },
{ [ContentTypes.TEXT]: "Here's your answer.", type: ContentTypes.TEXT },
]);
});
@@ -217,7 +217,7 @@ describe('formatAgentMessages', () => {
role: 'assistant',
content: [{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'How can I help you?' }],
},
{ role: 'user', content: 'What\'s the weather?' },
{ role: 'user', content: "What's the weather?" },
{
role: 'assistant',
content: [
@@ -240,7 +240,7 @@ describe('formatAgentMessages', () => {
{
role: 'assistant',
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Here\'s the weather information.' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: "Here's the weather information." },
],
},
];
@@ -265,12 +265,12 @@ describe('formatAgentMessages', () => {
{ [ContentTypes.TEXT]: 'How can I help you?', type: ContentTypes.TEXT },
]);
expect(result[2].content).toStrictEqual([
{ [ContentTypes.TEXT]: 'What\'s the weather?', type: ContentTypes.TEXT },
{ [ContentTypes.TEXT]: "What's the weather?", type: ContentTypes.TEXT },
]);
expect(result[3].content).toBe('Let me check that for you.');
expect(result[4].content).toBe('Sunny, 75°F');
expect(result[5].content).toStrictEqual([
{ [ContentTypes.TEXT]: 'Here\'s the weather information.', type: ContentTypes.TEXT },
{ [ContentTypes.TEXT]: "Here's the weather information.", type: ContentTypes.TEXT },
]);
// Check that there are no consecutive AIMessages

View File

@@ -1,8 +1,8 @@
module.exports = {
instructions:
'Remember, all your responses MUST be in the format described. Do not respond unless it\'s in the format described, using the structure of Action, Action Input, etc.',
"Remember, all your responses MUST be in the format described. Do not respond unless it's in the format described, using the structure of Action, Action Input, etc.",
errorInstructions:
'\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn\'t mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:',
"\nYou encountered an error in attempting a response. The user is not aware of the error so you shouldn't mention it.\nReview the actions taken carefully in case there is a partial or complete answer within them.\nError Message:",
imageInstructions:
'You must include the exact image paths from above, formatted in Markdown syntax: ![alt-text](URL)',
completionInstructions:

View File

@@ -18,17 +18,17 @@ function generateShadcnPrompt(options) {
Here are the components that are available, along with how to import them, and how to use them:
${Object.values(components)
.map((component) => {
if (useXML) {
return dedent`
.map((component) => {
if (useXML) {
return dedent`
<component>
<name>${component.componentName}</name>
<import-instructions>${component.importDocs}</import-instructions>
<usage-instructions>${component.usageDocs}</usage-instructions>
</component>
`;
} else {
return dedent`
} else {
return dedent`
# ${component.componentName}
## Import Instructions
@@ -37,9 +37,9 @@ function generateShadcnPrompt(options) {
## Usage Instructions
${component.usageDocs}
`;
}
})
.join('\n\n')}
}
})
.join('\n\n')}
`;
return systemPrompt;

View File

@@ -15,7 +15,7 @@ describe('AnthropicClient', () => {
{
role: 'user',
isCreatedByUser: true,
text: 'What\'s up',
text: "What's up",
messageId: '3',
parentMessageId: '2',
},
@@ -170,7 +170,7 @@ describe('AnthropicClient', () => {
client.options.modelLabel = 'Claude-2';
const result = await client.buildMessages(messages, parentMessageId);
const { prompt } = result;
expect(prompt).toContain('Human\'s name: John');
expect(prompt).toContain("Human's name: John");
expect(prompt).toContain('You are Claude-2');
});
});
@@ -244,6 +244,64 @@ describe('AnthropicClient', () => {
);
});
describe('Claude 4 model headers', () => {
it('should add "prompt-caching" beta header for claude-sonnet-4 model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-sonnet-4-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should add "prompt-caching" beta header for claude-opus-4 model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-opus-4-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should add "prompt-caching" beta header for claude-4-sonnet model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-4-sonnet-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should add "prompt-caching" beta header for claude-4-opus model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-4-opus-20250514',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
});
it('should not add beta header for claude-3-5-sonnet-latest model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
@@ -456,6 +514,34 @@ describe('AnthropicClient', () => {
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should not cap maxOutputTokens for Claude 4 Sonnet models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10; // 40,960 tokens
client.setOptions({
modelOptions: {
model: 'claude-sonnet-4-20250514',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should not cap maxOutputTokens for Claude 4 Opus models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 6; // 24,576 tokens (under 32K limit)
client.setOptions({
modelOptions: {
model: 'claude-opus-4-20250514',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
@@ -729,4 +815,223 @@ describe('AnthropicClient', () => {
expect(capturedOptions).toHaveProperty('topK', 10);
expect(capturedOptions).toHaveProperty('topP', 0.9);
});
describe('isClaudeLatest', () => {
it('should set isClaudeLatest to true for claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3-sonnet-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-3.5 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3.5-sonnet-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-sonnet-4 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-sonnet-4-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-opus-4 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-opus-4-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to true for claude-3.5-haiku models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3.5-haiku-20240229',
},
});
expect(client.isClaudeLatest).toBe(true);
});
it('should set isClaudeLatest to false for claude-2 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-instant models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-instant',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-sonnet-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-sonnet-3-20240229',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-opus-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-opus-3-20240229',
},
});
expect(client.isClaudeLatest).toBe(false);
});
it('should set isClaudeLatest to false for claude-haiku-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-haiku-3-20240229',
},
});
expect(client.isClaudeLatest).toBe(false);
});
});
describe('configureReasoning', () => {
it('should enable thinking for claude-opus-4 and claude-sonnet-4 models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
// Test claude-opus-4
client.setOptions({
modelOptions: {
model: 'claude-opus-4-20250514',
},
thinking: true,
thinkingBudget: 2000,
});
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('thinking');
expect(capturedOptions.thinking).toEqual({
type: 'enabled',
budget_tokens: 2000,
});
// Test claude-sonnet-4
client.setOptions({
modelOptions: {
model: 'claude-sonnet-4-20250514',
},
thinking: true,
thinkingBudget: 2000,
});
await client.sendCompletion(payload, {});
expect(capturedOptions).toHaveProperty('thinking');
expect(capturedOptions.thinking).toEqual({
type: 'enabled',
budget_tokens: 2000,
});
});
});
});
describe('Claude Model Tests', () => {
it('should handle Claude 3 and 4 series models correctly', () => {
const client = new AnthropicClient('test-key');
// Claude 3 series models
const claude3Models = [
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-haiku-20240307',
'claude-3-5-sonnet-20240620',
'claude-3-5-haiku-20240620',
'claude-3.5-sonnet-20240620',
'claude-3.5-haiku-20240620',
'claude-3.7-sonnet-20240620',
'claude-3.7-haiku-20240620',
'anthropic/claude-3-opus-20240229',
'claude-3-opus-20240229/anthropic',
];
// Claude 4 series models
const claude4Models = [
'claude-sonnet-4-20250514',
'claude-opus-4-20250514',
'claude-4-sonnet-20250514',
'claude-4-opus-20250514',
'anthropic/claude-sonnet-4-20250514',
'claude-sonnet-4-20250514/anthropic',
];
// Test Claude 3 series
claude3Models.forEach((model) => {
client.setOptions({ modelOptions: { model } });
expect(
/claude-[3-9]/.test(client.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
).toBe(true);
});
// Test Claude 4 series
claude4Models.forEach((model) => {
client.setOptions({ modelOptions: { model } });
expect(
/claude-[3-9]/.test(client.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
).toBe(true);
});
// Test non-Claude 3/4 models
const nonClaudeModels = ['claude-2', 'claude-instant', 'gpt-4', 'gpt-3.5-turbo'];
nonClaudeModels.forEach((model) => {
client.setOptions({ modelOptions: { model } });
expect(
/claude-[3-9]/.test(client.modelOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(client.modelOptions.model),
).toBe(false);
});
});
});

View File

@@ -52,7 +52,7 @@ const messageHistory = [
{
role: 'user',
isCreatedByUser: true,
text: 'What\'s up',
text: "What's up",
messageId: '3',
parentMessageId: '2',
},
@@ -456,7 +456,7 @@ describe('BaseClient', () => {
const chatMessages2 = await TestClient.loadHistory(conversationId, '3');
expect(TestClient.currentMessages).toHaveLength(3);
expect(chatMessages2[chatMessages2.length - 1].text).toEqual('What\'s up');
expect(chatMessages2[chatMessages2.length - 1].text).toEqual("What's up");
});
/* Most of the new sendMessage logic revolving around edited/continued AI messages

View File

@@ -462,17 +462,17 @@ describe('OpenAIClient', () => {
role: 'system',
name: 'example_user',
content:
'Let\'s circle back when we have more bandwidth to touch base on opportunities for increased leverage.',
"Let's circle back when we have more bandwidth to touch base on opportunities for increased leverage.",
},
{
role: 'system',
name: 'example_assistant',
content: 'Let\'s talk later when we\'re less busy about how to do better.',
content: "Let's talk later when we're less busy about how to do better.",
},
{
role: 'user',
content:
'This late pivot means we don\'t have time to boil the ocean for the client deliverable.',
"This late pivot means we don't have time to boil the ocean for the client deliverable.",
},
];

View File

@@ -18,7 +18,7 @@ class AzureAISearch extends Tool {
super();
this.name = 'azure-ai-search';
this.description =
'Use the \'azure-ai-search\' tool to retrieve search results relevant to your input';
"Use the 'azure-ai-search' tool to retrieve search results relevant to your input";
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;

View File

@@ -11,7 +11,7 @@ const extractBaseURL = require('~/utils/extractBaseURL');
const { logger } = require('~/config');
const displayMessage =
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
"DALL-E displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
class DALLE3 extends Tool {
constructor(fields = {}) {
super();

View File

@@ -8,7 +8,7 @@ const { FileContext, ContentTypes } = require('librechat-data-provider');
const { logger } = require('~/config');
const displayMessage =
'Flux displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
"Flux displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
/**
* FluxAPI - A tool for generating high-quality images from text prompts using the Flux API.

View File

@@ -64,7 +64,7 @@ const DEFAULT_IMAGE_EDIT_PROMPT_DESCRIPTION = `Describe the changes, enhancement
Always base this prompt on the most recently uploaded reference images.`;
const displayMessage =
'The tool displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
"The tool displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
/**
* Replaces unwanted characters from the input string

View File

@@ -232,7 +232,7 @@ class OpenWeather extends Tool {
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
return 'Error: lat and lon are required and must be numbers for this action (or specify \'city\').';
return "Error: lat and lon are required and must be numbers for this action (or specify 'city').";
}
}
@@ -243,7 +243,7 @@ class OpenWeather extends Tool {
let dt;
if (action === 'timestamp') {
if (!date) {
return 'Error: For timestamp action, a \'date\' in YYYY-MM-DD format is required.';
return "Error: For timestamp action, a 'date' in YYYY-MM-DD format is required.";
}
dt = this.convertDateToUnix(date);
}

View File

@@ -11,7 +11,7 @@ const paths = require('~/config/paths');
const { logger } = require('~/config');
const displayMessage =
'Stable Diffusion displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.';
"Stable Diffusion displayed an image. All generated images are already plainly visible, so don't repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.";
class StableDiffusionAPI extends Tool {
constructor(fields) {
@@ -44,7 +44,7 @@ class StableDiffusionAPI extends Tool {
// "negative_prompt":"semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, out of frame, low quality, ugly, mutation, deformed"
// - Generate images only once per human query unless explicitly requested by the user`;
this.description =
'You can generate images using text with \'stable-diffusion\'. This tool is exclusively for visual content.';
"You can generate images using text with 'stable-diffusion'. This tool is exclusively for visual content.";
this.schema = z.object({
prompt: z
.string()

View File

@@ -21,7 +21,7 @@ class TraversaalSearch extends Tool {
query: z
.string()
.describe(
'A properly written sentence to be interpreted by an AI to search the web according to the user\'s request.',
"A properly written sentence to be interpreted by an AI to search the web according to the user's request.",
),
});
@@ -38,7 +38,6 @@ class TraversaalSearch extends Tool {
return apiKey;
}
// eslint-disable-next-line no-unused-vars
async _call({ query }, _runManager) {
const body = {
query: [query],

View File

@@ -29,7 +29,7 @@ function parseTranscript(transcriptResponse) {
.map((entry) => entry.text.trim())
.filter((text) => text)
.join(' ')
.replaceAll('&amp;#39;', '\'');
.replaceAll('&amp;#39;', "'");
}
function createYouTubeTools(fields = {}) {

View File

@@ -135,7 +135,7 @@ const createFileSearchTool = async ({ req, files, entity_id }) => {
query: z
.string()
.describe(
'A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you\'re looking for. The query will be used for semantic similarity matching against the file contents.',
"A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you're looking for. The query will be used for semantic similarity matching against the file contents.",
),
}),
},

View File

@@ -1,7 +1,13 @@
const { SerpAPI } = require('@langchain/community/tools/serpapi');
const { Calculator } = require('@langchain/community/tools/calculator');
const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
const { Tools, Constants, EToolResources } = require('librechat-data-provider');
const { EnvVar, createCodeExecutionTool, createSearchTool } = require('@librechat/agents');
const {
Tools,
Constants,
EToolResources,
loadWebSearchAuth,
replaceSpecialVars,
} = require('librechat-data-provider');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
@@ -138,7 +144,6 @@ const loadTools = async ({
agent,
model,
endpoint,
useSpecs,
tools = [],
options = {},
functions = true,
@@ -263,6 +268,33 @@ const loadTools = async ({
return createFileSearchTool({ req: options.req, files, entity_id: agent?.id });
};
continue;
} else if (tool === Tools.web_search) {
const webSearchConfig = options?.req?.app?.locals?.webSearch;
const result = await loadWebSearchAuth({
userId: user,
loadAuthValues,
webSearchConfig,
});
const { onSearchResults, onGetHighlights } = options?.[Tools.web_search] ?? {};
requestedTools[tool] = async () => {
toolContextMap[tool] = `# \`${tool}\`:
Current Date & Time: ${replaceSpecialVars({ text: '{{iso_datetime}}' })}
1. **Execute immediately without preface** when using \`${tool}\`.
2. **After the search, begin with a brief summary** that directly addresses the query without headers or explaining your process.
3. **Structure your response clearly** using Markdown formatting (Level 2 headers for sections, lists for multiple points, tables for comparisons).
4. **Cite sources properly** according to the citation anchor format, utilizing group anchors when appropriate.
5. **Tailor your approach to the query type** (academic, news, coding, etc.) while maintaining an expert, journalistic, unbiased tone.
6. **Provide comprehensive information** with specific details, examples, and as much relevant context as possible from search results.
7. **Avoid moralizing language.**
`.trim();
return createSearchTool({
...result.authResult,
onSearchResults,
onGetHighlights,
logger,
});
};
continue;
} else if (tool && appTools[tool] && mcpToolPattern.test(tool)) {
requestedTools[tool] = async () =>
createMCPTool({

View File

@@ -218,7 +218,6 @@ describe('Tool Handlers', () => {
try {
await loadTool2();
} catch (error) {
// eslint-disable-next-line jest/no-conditional-expect
expect(error).toBeDefined();
}
});

View File

@@ -61,6 +61,10 @@ const abortKeys = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
const openIdExchangedTokensCache = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.OPENID_EXCHANGED_TOKENS, ttl: Time.TEN_MINUTES });
const namespaces = {
[CacheKeys.ROLES]: roles,
[CacheKeys.CONFIG_STORE]: config,
@@ -98,6 +102,7 @@ const namespaces = {
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
[CacheKeys.FLOWS]: flows,
[CacheKeys.OPENID_EXCHANGED_TOKENS]: openIdExchangedTokensCache,
};
/**

View File

@@ -76,10 +76,13 @@ if (REDIS_URI && isEnabled(USE_REDIS)) {
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
}
const pingInterval = setInterval(() => {
logger.debug('KeyvRedis ping');
keyvRedis.client.ping().catch(err => logger.error('Redis keep-alive ping failed:', err));
}, 5 * 60 * 1000);
const pingInterval = setInterval(
() => {
logger.debug('KeyvRedis ping');
keyvRedis.client.ping().catch((err) => logger.error('Redis keep-alive ping failed:', err));
},
5 * 60 * 1000,
);
keyvRedis.on('ready', () => {
logger.info('KeyvRedis connection ready');

View File

@@ -11,5 +11,8 @@ module.exports = {
moduleNameMapper: {
'~/(.*)': '<rootDir>/$1',
'~/data/auth.json': '<rootDir>/__mocks__/auth.mock.json',
'^openid-client/passport$': '<rootDir>/test/__mocks__/openid-client-passport.js', // Mock for the passport strategy part
'^openid-client$': '<rootDir>/test/__mocks__/openid-client.js',
},
transformIgnorePatterns: ['/node_modules/(?!(openid-client|oauth4webapi|jose)/).*/'],
};

View File

@@ -1,6 +1,7 @@
const mongoose = require('mongoose');
const crypto = require('node:crypto');
const { agentSchema } = require('@librechat/data-schemas');
const { SystemRoles, Tools } = require('librechat-data-provider');
const { SystemRoles, Tools, actionDelimiter } = require('librechat-data-provider');
const { GLOBAL_PROJECT_NAME, EPHEMERAL_AGENT_ID, mcp_delimiter } =
require('librechat-data-provider').Constants;
const { CONFIG_STORE, STARTUP_CONFIG } = require('librechat-data-provider').CacheKeys;
@@ -11,6 +12,8 @@ const {
removeAgentFromAllProjects,
} = require('./Project');
const getLogStores = require('~/cache/getLogStores');
const { getActions } = require('./Action');
const { logger } = require('~/config');
const Agent = mongoose.model('agent', agentSchema);
@@ -21,7 +24,19 @@ const Agent = mongoose.model('agent', agentSchema);
* @throws {Error} If the agent creation fails.
*/
const createAgent = async (agentData) => {
return (await Agent.create(agentData)).toObject();
const { author, ...versionData } = agentData;
const timestamp = new Date();
const initialAgentData = {
...agentData,
versions: [
{
...versionData,
createdAt: timestamp,
updatedAt: timestamp,
},
],
};
return (await Agent.create(initialAgentData)).toObject();
};
/**
@@ -48,12 +63,17 @@ const loadEphemeralAgent = ({ req, agent_id, endpoint, model_parameters: _m }) =
const { model, ...model_parameters } = _m;
/** @type {Record<string, FunctionTool>} */
const availableTools = req.app.locals.availableTools;
const mcpServers = new Set(req.body.ephemeralAgent?.mcp);
/** @type {TEphemeralAgent | null} */
const ephemeralAgent = req.body.ephemeralAgent;
const mcpServers = new Set(ephemeralAgent?.mcp);
/** @type {string[]} */
const tools = [];
if (req.body.ephemeralAgent?.execute_code === true) {
if (ephemeralAgent?.execute_code === true) {
tools.push(Tools.execute_code);
}
if (ephemeralAgent?.web_search === true) {
tools.push(Tools.web_search);
}
if (mcpServers.size > 0) {
for (const toolName of Object.keys(availableTools)) {
@@ -103,6 +123,8 @@ const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
return null;
}
agent.version = agent.versions ? agent.versions.length : 0;
if (agent.author.toString() === req.user.id) {
return agent;
}
@@ -127,19 +149,207 @@ const loadAgent = async ({ req, agent_id, endpoint, model_parameters }) => {
}
};
/**
* Check if a version already exists in the versions array, excluding timestamp and author fields
* @param {Object} updateData - The update data to compare
* @param {Object} currentData - The current agent data
* @param {Array} versions - The existing versions array
* @param {string} [actionsHash] - Hash of current action metadata
* @returns {Object|null} - The matching version if found, null otherwise
*/
const isDuplicateVersion = (updateData, currentData, versions, actionsHash = null) => {
if (!versions || versions.length === 0) {
return null;
}
const excludeFields = [
'_id',
'id',
'createdAt',
'updatedAt',
'author',
'updatedBy',
'created_at',
'updated_at',
'__v',
'agent_ids',
'versions',
'actionsHash', // Exclude actionsHash from direct comparison
];
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
if (Object.keys(directUpdates).length === 0 && !actionsHash) {
return null;
}
const wouldBeVersion = { ...currentData, ...directUpdates };
const lastVersion = versions[versions.length - 1];
if (actionsHash && lastVersion.actionsHash !== actionsHash) {
return null;
}
const allFields = new Set([...Object.keys(wouldBeVersion), ...Object.keys(lastVersion)]);
const importantFields = Array.from(allFields).filter((field) => !excludeFields.includes(field));
let isMatch = true;
for (const field of importantFields) {
if (!wouldBeVersion[field] && !lastVersion[field]) {
continue;
}
if (Array.isArray(wouldBeVersion[field]) && Array.isArray(lastVersion[field])) {
if (wouldBeVersion[field].length !== lastVersion[field].length) {
isMatch = false;
break;
}
// Special handling for projectIds (MongoDB ObjectIds)
if (field === 'projectIds') {
const wouldBeIds = wouldBeVersion[field].map((id) => id.toString()).sort();
const versionIds = lastVersion[field].map((id) => id.toString()).sort();
if (!wouldBeIds.every((id, i) => id === versionIds[i])) {
isMatch = false;
break;
}
}
// Handle arrays of objects like tool_kwargs
else if (typeof wouldBeVersion[field][0] === 'object' && wouldBeVersion[field][0] !== null) {
const sortedWouldBe = [...wouldBeVersion[field]].map((item) => JSON.stringify(item)).sort();
const sortedVersion = [...lastVersion[field]].map((item) => JSON.stringify(item)).sort();
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
isMatch = false;
break;
}
} else {
const sortedWouldBe = [...wouldBeVersion[field]].sort();
const sortedVersion = [...lastVersion[field]].sort();
if (!sortedWouldBe.every((item, i) => item === sortedVersion[i])) {
isMatch = false;
break;
}
}
} else if (field === 'model_parameters') {
const wouldBeParams = wouldBeVersion[field] || {};
const lastVersionParams = lastVersion[field] || {};
if (JSON.stringify(wouldBeParams) !== JSON.stringify(lastVersionParams)) {
isMatch = false;
break;
}
} else if (wouldBeVersion[field] !== lastVersion[field]) {
isMatch = false;
break;
}
}
return isMatch ? lastVersion : null;
};
/**
* Update an agent with new data without overwriting existing
* properties, or create a new agent if it doesn't exist.
* When an agent is updated, a copy of the current state will be saved to the versions array.
*
* @param {Object} searchParameter - The search parameters to find the agent to update.
* @param {string} searchParameter.id - The ID of the agent to update.
* @param {string} [searchParameter.author] - The user ID of the agent's author.
* @param {Object} updateData - An object containing the properties to update.
* @param {Object} [options] - Optional configuration object.
* @param {string} [options.updatingUserId] - The ID of the user performing the update (used for tracking non-author updates).
* @param {boolean} [options.forceVersion] - Force creation of a new version even if no fields changed.
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
* @throws {Error} If the update would create a duplicate version
*/
const updateAgent = async (searchParameter, updateData) => {
const options = { new: true, upsert: false };
return Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
const updateAgent = async (searchParameter, updateData, options = {}) => {
const { updatingUserId = null, forceVersion = false } = options;
const mongoOptions = { new: true, upsert: false };
const currentAgent = await Agent.findOne(searchParameter);
if (currentAgent) {
const { __v, _id, id, versions, author, ...versionData } = currentAgent.toObject();
const { $push, $pull, $addToSet, ...directUpdates } = updateData;
let actionsHash = null;
// Generate actions hash if agent has actions
if (currentAgent.actions && currentAgent.actions.length > 0) {
// Extract action IDs from the format "domain_action_id"
const actionIds = currentAgent.actions
.map((action) => {
const parts = action.split(actionDelimiter);
return parts[1]; // Get just the action ID part
})
.filter(Boolean);
if (actionIds.length > 0) {
try {
const actions = await getActions(
{
action_id: { $in: actionIds },
},
true,
); // Include sensitive data for hash
actionsHash = await generateActionMetadataHash(currentAgent.actions, actions);
} catch (error) {
logger.error('Error fetching actions for hash generation:', error);
}
}
}
const shouldCreateVersion =
forceVersion ||
(versions &&
versions.length > 0 &&
(Object.keys(directUpdates).length > 0 || $push || $pull || $addToSet));
if (shouldCreateVersion) {
const duplicateVersion = isDuplicateVersion(updateData, versionData, versions, actionsHash);
if (duplicateVersion && !forceVersion) {
const error = new Error(
'Duplicate version: This would create a version identical to an existing one',
);
error.statusCode = 409;
error.details = {
duplicateVersion,
versionIndex: versions.findIndex(
(v) => JSON.stringify(duplicateVersion) === JSON.stringify(v),
),
};
throw error;
}
}
const versionEntry = {
...versionData,
...directUpdates,
updatedAt: new Date(),
};
// Include actions hash in version if available
if (actionsHash) {
versionEntry.actionsHash = actionsHash;
}
// Always store updatedBy field to track who made the change
if (updatingUserId) {
versionEntry.updatedBy = new mongoose.Types.ObjectId(updatingUserId);
}
if (shouldCreateVersion || forceVersion) {
updateData.$push = {
...($push || {}),
versions: versionEntry,
};
}
}
return Agent.findOneAndUpdate(searchParameter, updateData, mongoOptions).lean();
};
/**
@@ -151,7 +361,7 @@ const updateAgent = async (searchParameter, updateData) => {
* @param {string} params.file_id
* @returns {Promise<Agent>} The updated agent.
*/
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
const addAgentResourceFile = async ({ req, agent_id, tool_resource, file_id }) => {
const searchParameter = { id: agent_id };
let agent = await getAgent(searchParameter);
if (!agent) {
@@ -177,7 +387,9 @@ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
},
};
const updatedAgent = await updateAgent(searchParameter, updateData);
const updatedAgent = await updateAgent(searchParameter, updateData, {
updatingUserId: req?.user?.id,
});
if (updatedAgent) {
return updatedAgent;
} else {
@@ -341,7 +553,7 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
delete updateQuery.author;
}
const updatedAgent = await updateAgent(updateQuery, updateOps);
const updatedAgent = await updateAgent(updateQuery, updateOps, { updatingUserId: user.id });
if (updatedAgent) {
return updatedAgent;
}
@@ -358,6 +570,97 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
return await getAgent({ id: agentId });
};
/**
* Reverts an agent to a specific version in its version history.
* @param {Object} searchParameter - The search parameters to find the agent to revert.
* @param {string} searchParameter.id - The ID of the agent to revert.
* @param {string} [searchParameter.author] - The user ID of the agent's author.
* @param {number} versionIndex - The index of the version to revert to in the versions array.
* @returns {Promise<MongoAgent>} The updated agent document after reverting.
* @throws {Error} If the agent is not found or the specified version does not exist.
*/
const revertAgentVersion = async (searchParameter, versionIndex) => {
const agent = await Agent.findOne(searchParameter);
if (!agent) {
throw new Error('Agent not found');
}
if (!agent.versions || !agent.versions[versionIndex]) {
throw new Error(`Version ${versionIndex} not found`);
}
const revertToVersion = agent.versions[versionIndex];
const updateData = {
...revertToVersion,
};
delete updateData._id;
delete updateData.id;
delete updateData.versions;
delete updateData.author;
delete updateData.updatedBy;
return Agent.findOneAndUpdate(searchParameter, updateData, { new: true }).lean();
};
/**
* Generates a hash of action metadata for version comparison
* @param {string[]} actionIds - Array of action IDs in format "domain_action_id"
* @param {Action[]} actions - Array of action documents
* @returns {Promise<string>} - SHA256 hash of the action metadata
*/
const generateActionMetadataHash = async (actionIds, actions) => {
if (!actionIds || actionIds.length === 0) {
return '';
}
// Create a map of action_id to metadata for quick lookup
const actionMap = new Map();
actions.forEach((action) => {
actionMap.set(action.action_id, action.metadata);
});
// Sort action IDs for consistent hashing
const sortedActionIds = [...actionIds].sort();
// Build a deterministic string representation of all action metadata
const metadataString = sortedActionIds
.map((actionFullId) => {
// Extract just the action_id part (after the delimiter)
const parts = actionFullId.split(actionDelimiter);
const actionId = parts[1];
const metadata = actionMap.get(actionId);
if (!metadata) {
return `${actionId}:null`;
}
// Sort metadata keys for deterministic output
const sortedKeys = Object.keys(metadata).sort();
const metadataStr = sortedKeys
.map((key) => `${key}:${JSON.stringify(metadata[key])}`)
.join(',');
return `${actionId}:{${metadataStr}}`;
})
.join(';');
// Use Web Crypto API to generate hash
const encoder = new TextEncoder();
const data = encoder.encode(metadataString);
const hashBuffer = await crypto.webcrypto.subtle.digest('SHA-256', data);
const hashArray = Array.from(new Uint8Array(hashBuffer));
const hashHex = hashArray.map((b) => b.toString(16).padStart(2, '0')).join('');
return hashHex;
};
/**
* Load a default agent based on the endpoint
* @param {string} endpoint
* @returns {Agent | null}
*/
module.exports = {
Agent,
getAgent,
@@ -366,7 +669,9 @@ module.exports = {
updateAgent,
deleteAgent,
getListAgents,
revertAgentVersion,
updateAgentProjects,
addAgentResourceFile,
removeAgentResourceFiles,
generateActionMetadataHash,
};

View File

@@ -1,7 +1,25 @@
const originalEnv = {
CREDS_KEY: process.env.CREDS_KEY,
CREDS_IV: process.env.CREDS_IV,
};
process.env.CREDS_KEY = '0123456789abcdef0123456789abcdef';
process.env.CREDS_IV = '0123456789abcdef';
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { Agent, addAgentResourceFile, removeAgentResourceFiles } = require('./Agent');
const {
Agent,
addAgentResourceFile,
removeAgentResourceFiles,
createAgent,
updateAgent,
getAgent,
deleteAgent,
getListAgents,
updateAgentProjects,
} = require('./Agent');
describe('Agent Resource File Operations', () => {
let mongoServer;
@@ -15,6 +33,8 @@ describe('Agent Resource File Operations', () => {
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
process.env.CREDS_KEY = originalEnv.CREDS_KEY;
process.env.CREDS_IV = originalEnv.CREDS_IV;
});
beforeEach(async () => {
@@ -332,3 +352,725 @@ describe('Agent Resource File Operations', () => {
expect(finalFileIds).toHaveLength(0);
});
});
describe('Agent CRUD Operations', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
});
test('should create and get an agent', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const newAgent = await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: authorId,
description: 'Test description',
});
expect(newAgent).toBeDefined();
expect(newAgent.id).toBe(agentId);
expect(newAgent.name).toBe('Test Agent');
const retrievedAgent = await getAgent({ id: agentId });
expect(retrievedAgent).toBeDefined();
expect(retrievedAgent.id).toBe(agentId);
expect(retrievedAgent.name).toBe('Test Agent');
expect(retrievedAgent.description).toBe('Test description');
});
test('should delete an agent', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Agent To Delete',
provider: 'test',
model: 'test-model',
author: authorId,
});
const agentBeforeDelete = await getAgent({ id: agentId });
expect(agentBeforeDelete).toBeDefined();
await deleteAgent({ id: agentId });
const agentAfterDelete = await getAgent({ id: agentId });
expect(agentAfterDelete).toBeNull();
});
test('should list agents by author', async () => {
const authorId = new mongoose.Types.ObjectId();
const otherAuthorId = new mongoose.Types.ObjectId();
const agentIds = [];
for (let i = 0; i < 5; i++) {
const id = `agent_${uuidv4()}`;
agentIds.push(id);
await createAgent({
id,
name: `Agent ${i}`,
provider: 'test',
model: 'test-model',
author: authorId,
});
}
for (let i = 0; i < 3; i++) {
await createAgent({
id: `other_agent_${uuidv4()}`,
name: `Other Agent ${i}`,
provider: 'test',
model: 'test-model',
author: otherAuthorId,
});
}
const result = await getListAgents({ author: authorId.toString() });
expect(result).toBeDefined();
expect(result.data).toBeDefined();
expect(result.data).toHaveLength(5);
expect(result.has_more).toBe(true);
for (const agent of result.data) {
expect(agent.author).toBe(authorId.toString());
}
});
test('should update agent projects', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const projectId1 = new mongoose.Types.ObjectId();
const projectId2 = new mongoose.Types.ObjectId();
const projectId3 = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Project Test Agent',
provider: 'test',
model: 'test-model',
author: authorId,
projectIds: [projectId1],
});
await updateAgent(
{ id: agentId },
{ $addToSet: { projectIds: { $each: [projectId2, projectId3] } } },
);
await updateAgent({ id: agentId }, { $pull: { projectIds: projectId1 } });
await updateAgent({ id: agentId }, { projectIds: [projectId2, projectId3] });
const updatedAgent = await getAgent({ id: agentId });
expect(updatedAgent.projectIds).toHaveLength(2);
expect(updatedAgent.projectIds.map((id) => id.toString())).toContain(projectId2.toString());
expect(updatedAgent.projectIds.map((id) => id.toString())).toContain(projectId3.toString());
expect(updatedAgent.projectIds.map((id) => id.toString())).not.toContain(projectId1.toString());
await updateAgent({ id: agentId }, { projectIds: [] });
const emptyProjectsAgent = await getAgent({ id: agentId });
expect(emptyProjectsAgent.projectIds).toHaveLength(0);
const nonExistentId = `agent_${uuidv4()}`;
await expect(
updateAgentProjects({
id: nonExistentId,
projectIds: [projectId1],
}),
).rejects.toThrow();
});
test('should handle ephemeral agent loading', async () => {
const agentId = 'ephemeral_test';
const endpoint = 'openai';
const originalModule = jest.requireActual('librechat-data-provider');
const mockDataProvider = {
...originalModule,
Constants: {
...originalModule.Constants,
EPHEMERAL_AGENT_ID: 'ephemeral_test',
},
};
jest.doMock('librechat-data-provider', () => mockDataProvider);
const mockReq = {
user: { id: 'user123' },
body: {
promptPrefix: 'This is a test instruction',
ephemeralAgent: {
execute_code: true,
mcp: ['server1', 'server2'],
},
},
app: {
locals: {
availableTools: {
tool__server1: {},
tool__server2: {},
another_tool: {},
},
},
},
};
const params = {
req: mockReq,
agent_id: agentId,
endpoint,
model_parameters: {
model: 'gpt-4',
temperature: 0.7,
},
};
expect(agentId).toBeDefined();
expect(endpoint).toBeDefined();
jest.dontMock('librechat-data-provider');
});
test('should handle loadAgent functionality and errors', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Test Load Agent',
provider: 'test',
model: 'test-model',
author: authorId,
tools: ['tool1', 'tool2'],
});
const agent = await getAgent({ id: agentId });
expect(agent).toBeDefined();
expect(agent.id).toBe(agentId);
expect(agent.name).toBe('Test Load Agent');
expect(agent.tools).toEqual(expect.arrayContaining(['tool1', 'tool2']));
const mockLoadAgent = jest.fn().mockResolvedValue(agent);
const loadedAgent = await mockLoadAgent();
expect(loadedAgent).toBeDefined();
expect(loadedAgent.id).toBe(agentId);
const nonExistentId = `agent_${uuidv4()}`;
const nonExistentAgent = await getAgent({ id: nonExistentId });
expect(nonExistentAgent).toBeNull();
const mockLoadAgentError = jest.fn().mockRejectedValue(new Error('No agent found with ID'));
await expect(mockLoadAgentError()).rejects.toThrow('No agent found with ID');
});
});
describe('Agent Version History', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
});
test('should create an agent with a single entry in versions array', async () => {
const agentId = `agent_${uuidv4()}`;
const agent = await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
expect(agent.versions).toBeDefined();
expect(Array.isArray(agent.versions)).toBe(true);
expect(agent.versions).toHaveLength(1);
expect(agent.versions[0].name).toBe('Test Agent');
expect(agent.versions[0].provider).toBe('test');
expect(agent.versions[0].model).toBe('test-model');
});
test('should accumulate version history across multiple updates', async () => {
const agentId = `agent_${uuidv4()}`;
const author = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'First Name',
provider: 'test',
model: 'test-model',
author,
description: 'First description',
});
await updateAgent({ id: agentId }, { name: 'Second Name', description: 'Second description' });
await updateAgent({ id: agentId }, { name: 'Third Name', model: 'new-model' });
const finalAgent = await updateAgent({ id: agentId }, { description: 'Final description' });
expect(finalAgent.versions).toBeDefined();
expect(Array.isArray(finalAgent.versions)).toBe(true);
expect(finalAgent.versions).toHaveLength(4);
expect(finalAgent.versions[0].name).toBe('First Name');
expect(finalAgent.versions[0].description).toBe('First description');
expect(finalAgent.versions[0].model).toBe('test-model');
expect(finalAgent.versions[1].name).toBe('Second Name');
expect(finalAgent.versions[1].description).toBe('Second description');
expect(finalAgent.versions[1].model).toBe('test-model');
expect(finalAgent.versions[2].name).toBe('Third Name');
expect(finalAgent.versions[2].description).toBe('Second description');
expect(finalAgent.versions[2].model).toBe('new-model');
expect(finalAgent.versions[3].name).toBe('Third Name');
expect(finalAgent.versions[3].description).toBe('Final description');
expect(finalAgent.versions[3].model).toBe('new-model');
expect(finalAgent.name).toBe('Third Name');
expect(finalAgent.description).toBe('Final description');
expect(finalAgent.model).toBe('new-model');
});
test('should not include metadata fields in version history', async () => {
const agentId = `agent_${uuidv4()}`;
await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
const updatedAgent = await updateAgent({ id: agentId }, { description: 'New description' });
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.versions[0]._id).toBeUndefined();
expect(updatedAgent.versions[0].__v).toBeUndefined();
expect(updatedAgent.versions[0].name).toBe('Test Agent');
expect(updatedAgent.versions[0].author).toBeUndefined();
expect(updatedAgent.versions[1]._id).toBeUndefined();
expect(updatedAgent.versions[1].__v).toBeUndefined();
});
test('should not recursively include previous versions', async () => {
const agentId = `agent_${uuidv4()}`;
await createAgent({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
await updateAgent({ id: agentId }, { name: 'Updated Name 1' });
await updateAgent({ id: agentId }, { name: 'Updated Name 2' });
const finalAgent = await updateAgent({ id: agentId }, { name: 'Updated Name 3' });
expect(finalAgent.versions).toHaveLength(4);
finalAgent.versions.forEach((version) => {
expect(version.versions).toBeUndefined();
});
});
test('should handle MongoDB operators and field updates correctly', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const projectId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'MongoDB Operator Test',
provider: 'test',
model: 'test-model',
author: authorId,
tools: ['tool1'],
});
await updateAgent(
{ id: agentId },
{
description: 'Updated description',
$push: { tools: 'tool2' },
$addToSet: { projectIds: projectId },
},
);
const firstUpdate = await getAgent({ id: agentId });
expect(firstUpdate.description).toBe('Updated description');
expect(firstUpdate.tools).toContain('tool1');
expect(firstUpdate.tools).toContain('tool2');
expect(firstUpdate.projectIds.map((id) => id.toString())).toContain(projectId.toString());
expect(firstUpdate.versions).toHaveLength(2);
await updateAgent(
{ id: agentId },
{
tools: ['tool2', 'tool3'],
},
);
const secondUpdate = await getAgent({ id: agentId });
expect(secondUpdate.tools).toHaveLength(2);
expect(secondUpdate.tools).toContain('tool2');
expect(secondUpdate.tools).toContain('tool3');
expect(secondUpdate.tools).not.toContain('tool1');
expect(secondUpdate.versions).toHaveLength(3);
await updateAgent(
{ id: agentId },
{
$push: { tools: 'tool3' },
},
);
const thirdUpdate = await getAgent({ id: agentId });
const toolCount = thirdUpdate.tools.filter((t) => t === 'tool3').length;
expect(toolCount).toBe(2);
expect(thirdUpdate.versions).toHaveLength(4);
});
test('should handle parameter objects correctly', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Parameters Test',
provider: 'test',
model: 'test-model',
author: authorId,
model_parameters: { temperature: 0.7 },
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ model_parameters: { temperature: 0.8 } },
);
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.model_parameters.temperature).toBe(0.8);
await updateAgent(
{ id: agentId },
{
model_parameters: {
temperature: 0.8,
max_tokens: 1000,
},
},
);
const complexAgent = await getAgent({ id: agentId });
expect(complexAgent.versions).toHaveLength(3);
expect(complexAgent.model_parameters.temperature).toBe(0.8);
expect(complexAgent.model_parameters.max_tokens).toBe(1000);
await updateAgent({ id: agentId }, { model_parameters: {} });
const emptyParamsAgent = await getAgent({ id: agentId });
expect(emptyParamsAgent.versions).toHaveLength(4);
expect(emptyParamsAgent.model_parameters).toEqual({});
});
test('should detect duplicate versions and reject updates', async () => {
const originalConsoleError = console.error;
console.error = jest.fn();
try {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const projectId1 = new mongoose.Types.ObjectId();
const projectId2 = new mongoose.Types.ObjectId();
const testCases = [
{
name: 'simple field update',
initial: {
name: 'Test Agent',
description: 'Initial description',
},
update: { name: 'Updated Name' },
duplicate: { name: 'Updated Name' },
},
{
name: 'object field update',
initial: {
model_parameters: { temperature: 0.7 },
},
update: { model_parameters: { temperature: 0.8 } },
duplicate: { model_parameters: { temperature: 0.8 } },
},
{
name: 'array field update',
initial: {
tools: ['tool1', 'tool2'],
},
update: { tools: ['tool2', 'tool3'] },
duplicate: { tools: ['tool2', 'tool3'] },
},
{
name: 'projectIds update',
initial: {
projectIds: [projectId1],
},
update: { projectIds: [projectId1, projectId2] },
duplicate: { projectIds: [projectId2, projectId1] },
},
];
for (const testCase of testCases) {
const testAgentId = `agent_${uuidv4()}`;
await createAgent({
id: testAgentId,
provider: 'test',
model: 'test-model',
author: authorId,
...testCase.initial,
});
await updateAgent({ id: testAgentId }, testCase.update);
let error;
try {
await updateAgent({ id: testAgentId }, testCase.duplicate);
} catch (e) {
error = e;
}
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
expect(error.details).toBeDefined();
expect(error.details.duplicateVersion).toBeDefined();
const agent = await getAgent({ id: testAgentId });
expect(agent.versions).toHaveLength(2);
}
} finally {
console.error = originalConsoleError;
}
});
test('should track updatedBy when a different user updates an agent', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
const updatingUser = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ name: 'Updated Agent', description: 'Updated description' },
{ updatingUserId: updatingUser.toString() },
);
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.versions[1].updatedBy.toString()).toBe(updatingUser.toString());
expect(updatedAgent.author.toString()).toBe(originalAuthor.toString());
});
test('should include updatedBy even when the original author updates the agent', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
const updatedAgent = await updateAgent(
{ id: agentId },
{ name: 'Updated Agent', description: 'Updated description' },
{ updatingUserId: originalAuthor.toString() },
);
expect(updatedAgent.versions).toHaveLength(2);
expect(updatedAgent.versions[1].updatedBy.toString()).toBe(originalAuthor.toString());
expect(updatedAgent.author.toString()).toBe(originalAuthor.toString());
});
test('should track multiple different users updating the same agent', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
const user1 = new mongoose.Types.ObjectId();
const user2 = new mongoose.Types.ObjectId();
const user3 = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
// User 1 makes an update
await updateAgent(
{ id: agentId },
{ name: 'Updated by User 1', description: 'First update' },
{ updatingUserId: user1.toString() },
);
// Original author makes an update
await updateAgent(
{ id: agentId },
{ description: 'Updated by original author' },
{ updatingUserId: originalAuthor.toString() },
);
// User 2 makes an update
await updateAgent(
{ id: agentId },
{ name: 'Updated by User 2', model: 'new-model' },
{ updatingUserId: user2.toString() },
);
// User 3 makes an update
const finalAgent = await updateAgent(
{ id: agentId },
{ description: 'Final update by User 3' },
{ updatingUserId: user3.toString() },
);
expect(finalAgent.versions).toHaveLength(5);
expect(finalAgent.author.toString()).toBe(originalAuthor.toString());
// Check that each version has the correct updatedBy
expect(finalAgent.versions[0].updatedBy).toBeUndefined(); // Initial creation has no updatedBy
expect(finalAgent.versions[1].updatedBy.toString()).toBe(user1.toString());
expect(finalAgent.versions[2].updatedBy.toString()).toBe(originalAuthor.toString());
expect(finalAgent.versions[3].updatedBy.toString()).toBe(user2.toString());
expect(finalAgent.versions[4].updatedBy.toString()).toBe(user3.toString());
// Verify the final state
expect(finalAgent.name).toBe('Updated by User 2');
expect(finalAgent.description).toBe('Final update by User 3');
expect(finalAgent.model).toBe('new-model');
});
test('should preserve original author during agent restoration', async () => {
const agentId = `agent_${uuidv4()}`;
const originalAuthor = new mongoose.Types.ObjectId();
const updatingUser = new mongoose.Types.ObjectId();
await createAgent({
id: agentId,
name: 'Original Agent',
provider: 'test',
model: 'test-model',
author: originalAuthor,
description: 'Original description',
});
await updateAgent(
{ id: agentId },
{ name: 'Updated Agent', description: 'Updated description' },
{ updatingUserId: updatingUser.toString() },
);
const { revertAgentVersion } = require('./Agent');
const revertedAgent = await revertAgentVersion({ id: agentId }, 0);
expect(revertedAgent.author.toString()).toBe(originalAuthor.toString());
expect(revertedAgent.name).toBe('Original Agent');
expect(revertedAgent.description).toBe('Original description');
});
test('should detect action metadata changes and force version update', async () => {
const agentId = `agent_${uuidv4()}`;
const authorId = new mongoose.Types.ObjectId();
const actionId = 'testActionId123';
// Create agent with actions
await createAgent({
id: agentId,
name: 'Agent with Actions',
provider: 'test',
model: 'test-model',
author: authorId,
actions: [`test.com_action_${actionId}`],
tools: ['listEvents_action_test.com', 'createEvent_action_test.com'],
});
// First update with forceVersion should create a version
const firstUpdate = await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: true },
);
expect(firstUpdate.versions).toHaveLength(2);
// Second update with same data but forceVersion should still create a version
const secondUpdate = await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: true },
);
expect(secondUpdate.versions).toHaveLength(3);
// Update without forceVersion and no changes should not create a version
let error;
try {
await updateAgent(
{ id: agentId },
{ tools: ['listEvents_action_test.com', 'createEvent_action_test.com'] },
{ updatingUserId: authorId.toString(), forceVersion: false },
);
} catch (e) {
error = e;
}
expect(error).toBeDefined();
expect(error.message).toContain('Duplicate version');
expect(error.statusCode).toBe(409);
});
});

View File

@@ -140,13 +140,13 @@ const adjustPositions = async (user, oldPosition, newPosition) => {
const position =
oldPosition < newPosition
? {
$gt: Math.min(oldPosition, newPosition),
$lte: Math.max(oldPosition, newPosition),
}
$gt: Math.min(oldPosition, newPosition),
$lte: Math.max(oldPosition, newPosition),
}
: {
$gte: Math.min(oldPosition, newPosition),
$lt: Math.max(oldPosition, newPosition),
};
$gte: Math.min(oldPosition, newPosition),
$lt: Math.max(oldPosition, newPosition),
};
await ConversationTag.updateMany(
{

View File

@@ -153,7 +153,7 @@ describe('Message Operations', () => {
});
describe('Conversation Hijacking Prevention', () => {
it('should not allow editing a message in another user\'s conversation', async () => {
it("should not allow editing a message in another user's conversation", async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = 'victim-convo-123';
const victimMessageId = 'victim-msg-123';
@@ -175,7 +175,7 @@ describe('Message Operations', () => {
);
});
it('should not allow deleting messages from another user\'s conversation', async () => {
it("should not allow deleting messages from another user's conversation", async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = 'victim-convo-123';
const victimMessageId = 'victim-msg-123';
@@ -193,7 +193,7 @@ describe('Message Operations', () => {
});
});
it('should not allow inserting a new message into another user\'s conversation', async () => {
it("should not allow inserting a new message into another user's conversation", async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = uuidv4(); // Use a valid UUID

View File

@@ -100,6 +100,8 @@ const tokenValues = Object.assign(
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-sonnet-4': { prompt: 3, completion: 15 },
'claude-opus-4': { prompt: 15, completion: 75 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
'claude-instant': { prompt: 0.8, completion: 2.4 },
@@ -162,6 +164,8 @@ const cacheTokenValues = {
'claude-3.5-haiku': { write: 1, read: 0.08 },
'claude-3-5-haiku': { write: 1, read: 0.08 },
'claude-3-haiku': { write: 0.3, read: 0.03 },
'claude-sonnet-4': { write: 3.75, read: 0.3 },
'claude-opus-4': { write: 18.75, read: 1.5 },
};
/**

View File

@@ -664,3 +664,97 @@ describe('Grok Model Tests - Pricing', () => {
});
});
});
describe('Claude Model Tests', () => {
it('should return correct prompt and completion rates for Claude 4 models', () => {
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'prompt' })).toBe(
tokenValues['claude-sonnet-4'].prompt,
);
expect(getMultiplier({ model: 'claude-sonnet-4', tokenType: 'completion' })).toBe(
tokenValues['claude-sonnet-4'].completion,
);
expect(getMultiplier({ model: 'claude-opus-4', tokenType: 'prompt' })).toBe(
tokenValues['claude-opus-4'].prompt,
);
expect(getMultiplier({ model: 'claude-opus-4', tokenType: 'completion' })).toBe(
tokenValues['claude-opus-4'].completion,
);
});
it('should handle Claude 4 model name variations with different prefixes and suffixes', () => {
const modelVariations = [
'claude-sonnet-4',
'claude-sonnet-4-20240229',
'claude-sonnet-4-latest',
'anthropic/claude-sonnet-4',
'claude-sonnet-4/anthropic',
'claude-sonnet-4-preview',
'claude-sonnet-4-20240229-preview',
'claude-opus-4',
'claude-opus-4-20240229',
'claude-opus-4-latest',
'anthropic/claude-opus-4',
'claude-opus-4/anthropic',
'claude-opus-4-preview',
'claude-opus-4-20240229-preview',
];
modelVariations.forEach((model) => {
const valueKey = getValueKey(model);
const isSonnet = model.includes('sonnet');
const expectedKey = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
expect(valueKey).toBe(expectedKey);
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(tokenValues[expectedKey].prompt);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(
tokenValues[expectedKey].completion,
);
});
});
it('should return correct cache rates for Claude 4 models', () => {
expect(getCacheMultiplier({ model: 'claude-sonnet-4', cacheType: 'write' })).toBe(
cacheTokenValues['claude-sonnet-4'].write,
);
expect(getCacheMultiplier({ model: 'claude-sonnet-4', cacheType: 'read' })).toBe(
cacheTokenValues['claude-sonnet-4'].read,
);
expect(getCacheMultiplier({ model: 'claude-opus-4', cacheType: 'write' })).toBe(
cacheTokenValues['claude-opus-4'].write,
);
expect(getCacheMultiplier({ model: 'claude-opus-4', cacheType: 'read' })).toBe(
cacheTokenValues['claude-opus-4'].read,
);
});
it('should handle Claude 4 model cache rates with different prefixes and suffixes', () => {
const modelVariations = [
'claude-sonnet-4',
'claude-sonnet-4-20240229',
'claude-sonnet-4-latest',
'anthropic/claude-sonnet-4',
'claude-sonnet-4/anthropic',
'claude-sonnet-4-preview',
'claude-sonnet-4-20240229-preview',
'claude-opus-4',
'claude-opus-4-20240229',
'claude-opus-4-latest',
'anthropic/claude-opus-4',
'claude-opus-4/anthropic',
'claude-opus-4-preview',
'claude-opus-4-20240229-preview',
];
modelVariations.forEach((model) => {
const isSonnet = model.includes('sonnet');
const expectedKey = isSonnet ? 'claude-sonnet-4' : 'claude-opus-4';
expect(getCacheMultiplier({ model, cacheType: 'write' })).toBe(
cacheTokenValues[expectedKey].write,
);
expect(getCacheMultiplier({ model, cacheType: 'read' })).toBe(
cacheTokenValues[expectedKey].read,
);
});
});
});

View File

@@ -43,13 +43,14 @@
"@google/generative-ai": "^0.23.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/redis": "^4.3.3",
"@langchain/community": "^0.3.42",
"@langchain/core": "^0.3.55",
"@langchain/google-genai": "^0.2.8",
"@langchain/google-vertexai": "^0.2.8",
"@langchain/community": "^0.3.44",
"@langchain/core": "^0.3.57",
"@langchain/google-genai": "^0.2.9",
"@langchain/google-vertexai": "^0.2.9",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.4.317",
"@librechat/agents": "^2.4.37",
"@librechat/data-schemas": "*",
"@node-saml/passport-saml": "^5.0.0",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "^1.8.2",
"bcryptjs": "^2.4.3",
@@ -75,6 +76,7 @@
"ioredis": "^5.3.2",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
"jwks-rsa": "^3.2.0",
"keyv": "^5.3.2",
"keyv-file": "^5.1.2",
"klona": "^2.0.6",
@@ -92,7 +94,7 @@
"ollama": "^0.5.0",
"openai": "^4.96.2",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^5.4.2",
"openid-client": "^6.5.0",
"passport": "^0.6.0",
"passport-apple": "^2.0.2",
"passport-discord": "^0.1.4",

View File

@@ -16,17 +16,17 @@ const FinalizationRegistry = global.FinalizationRegistry || null;
*/
const clientRegistry = FinalizationRegistry
? new FinalizationRegistry((heldValue) => {
try {
// This will run when the client is garbage collected
if (heldValue && heldValue.userId) {
logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`);
} else {
logger.debug('[FinalizationRegistry] Cleaning up client');
try {
// This will run when the client is garbage collected
if (heldValue && heldValue.userId) {
logger.debug(`[FinalizationRegistry] Cleaning up client for user ${heldValue.userId}`);
} else {
logger.debug('[FinalizationRegistry] Cleaning up client');
}
} catch (e) {
// Ignore errors
}
} catch (e) {
// Ignore errors
}
})
})
: null;
/**
@@ -134,15 +134,12 @@ function disposeClient(client) {
if (client.message_delta) {
client.message_delta = null;
}
if (client.isClaude3 !== undefined) {
client.isClaude3 = null;
if (client.isClaudeLatest !== undefined) {
client.isClaudeLatest = null;
}
if (client.useMessages !== undefined) {
client.useMessages = null;
}
if (client.isLegacyOutput !== undefined) {
client.isLegacyOutput = null;
}
if (client.supportsCacheControl !== undefined) {
client.supportsCacheControl = null;
}

View File

@@ -1,3 +1,4 @@
const openIdClient = require('openid-client');
const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const {
@@ -5,9 +6,12 @@ const {
resetPassword,
setAuthTokens,
requestPasswordReset,
setOpenIDAuthTokens,
} = require('~/server/services/AuthService');
const { findSession, getUserById, deleteAllUserSessions } = require('~/models');
const { findSession, getUserById, deleteAllUserSessions, findUser } = require('~/models');
const { getOpenIdConfig } = require('~/strategies');
const { logger } = require('~/config');
const { isEnabled } = require('~/server/utils');
const registrationController = async (req, res) => {
try {
@@ -55,10 +59,28 @@ const resetPasswordController = async (req, res) => {
const refreshController = async (req, res) => {
const refreshToken = req.headers.cookie ? cookies.parse(req.headers.cookie).refreshToken : null;
const token_provider = req.headers.cookie
? cookies.parse(req.headers.cookie).token_provider
: null;
if (!refreshToken) {
return res.status(200).send('Refresh token not provided');
}
if (token_provider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS) === true) {
try {
const openIdConfig = getOpenIdConfig();
const tokenset = await openIdClient.refreshTokenGrant(openIdConfig, refreshToken);
const claims = tokenset.claims();
const user = await findUser({ email: claims.email });
if (!user) {
return res.status(401).redirect('/login');
}
const token = setOpenIDAuthTokens(tokenset, res);
return res.status(200).send({ token, user });
} catch (error) {
logger.error('[refreshController] OpenID token refresh error', error);
return res.status(403).send('Invalid OpenID refresh token');
}
}
try {
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const user = await getUserById(payload.id, '-password -__v -totpSecret');

View File

@@ -1,9 +1,24 @@
const Balance = require('~/models/Balance');
async function balanceController(req, res) {
const { tokenCredits: balance = '' } =
(await Balance.findOne({ user: req.user.id }, 'tokenCredits').lean()) ?? {};
res.status(200).send('' + balance);
const balanceData = await Balance.findOne(
{ user: req.user.id },
'-_id tokenCredits autoRefillEnabled refillIntervalValue refillIntervalUnit lastRefill refillAmount',
).lean();
if (!balanceData) {
return res.status(404).json({ error: 'Balance not found' });
}
// If auto-refill is not enabled, remove auto-refill related fields from the response
if (!balanceData.autoRefillEnabled) {
delete balanceData.refillIntervalValue;
delete balanceData.refillIntervalUnit;
delete balanceData.lastRefill;
delete balanceData.refillAmount;
}
res.status(200).json(balanceData);
}
module.exports = balanceController;

View File

@@ -24,7 +24,6 @@ const handleValidationError = (err, res) => {
}
};
// eslint-disable-next-line no-unused-vars
module.exports = (err, req, res, next) => {
try {
if (err.name === 'ValidationError') {

View File

@@ -1,4 +1,10 @@
const { FileSources } = require('librechat-data-provider');
const {
Tools,
Constants,
FileSources,
webSearchKeys,
extractWebSearchEnvVars,
} = require('librechat-data-provider');
const {
Balance,
getFiles,
@@ -83,7 +89,6 @@ const deleteUserFiles = async (req) => {
const updateUserPluginsController = async (req, res) => {
const { user } = req;
const { pluginKey, action, auth, isEntityTool } = req.body;
let authService;
try {
if (!isEntityTool) {
const userPluginsService = await updateUserPluginsService(user, pluginKey, action);
@@ -95,32 +100,55 @@ const updateUserPluginsController = async (req, res) => {
}
}
if (auth) {
const keys = Object.keys(auth);
const values = Object.values(auth);
if (action === 'install' && keys.length > 0) {
for (let i = 0; i < keys.length; i++) {
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
const { status, message } = authService;
res.status(status).send({ message });
}
if (auth == null) {
return res.status(200).send();
}
let keys = Object.keys(auth);
if (keys.length === 0 && pluginKey !== Tools.web_search) {
return res.status(200).send();
}
const values = Object.values(auth);
/** @type {number} */
let status = 200;
/** @type {string} */
let message;
/** @type {IPluginAuth | Error} */
let authService;
if (pluginKey === Tools.web_search) {
/** @type {TCustomConfig['webSearch']} */
const webSearchConfig = req.app.locals?.webSearch;
keys = extractWebSearchEnvVars({
keys: action === 'install' ? keys : webSearchKeys,
config: webSearchConfig,
});
}
if (action === 'install') {
for (let i = 0; i < keys.length; i++) {
authService = await updateUserPluginAuth(user.id, keys[i], pluginKey, values[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
({ status, message } = authService);
}
}
if (action === 'uninstall' && keys.length > 0) {
for (let i = 0; i < keys.length; i++) {
authService = await deleteUserPluginAuth(user.id, keys[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
const { status, message } = authService;
res.status(status).send({ message });
}
} else if (action === 'uninstall') {
for (let i = 0; i < keys.length; i++) {
authService = await deleteUserPluginAuth(user.id, keys[i]);
if (authService instanceof Error) {
logger.error('[authService]', authService);
({ status, message } = authService);
}
}
}
res.status(200).send();
if (status === 200) {
return res.status(status).send();
}
res.status(status).send({ message });
} catch (err) {
logger.error('[updateUserPluginsController]', err);
return res.status(500).json({ message: 'Something went wrong.' });

View File

@@ -237,6 +237,30 @@ function createToolEndCallback({ req, res, artifactPromises }) {
return;
}
if (output.artifact[Tools.web_search]) {
artifactPromises.push(
(async () => {
const name = `${output.name}_${output.tool_call_id}_${nanoid()}`;
const attachment = {
name,
type: Tools.web_search,
messageId: metadata.run_id,
toolCallId: output.tool_call_id,
conversationId: metadata.thread_id,
[Tools.web_search]: { ...output.artifact[Tools.web_search] },
};
if (!res.headersSent) {
return attachment;
}
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
return attachment;
})().catch((error) => {
logger.error('Error processing artifact content:', error);
return null;
}),
);
}
if (output.artifact.content) {
/** @type {FormattedContent[]} */
const content = output.artifact.content;

View File

@@ -39,9 +39,6 @@ const BaseClient = require('~/app/clients/BaseClient');
const { logger, sendEvent } = require('~/config');
const { createRun } = require('./run');
/** @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex */
/** @typedef {import('@langchain/core/runnables').RunnableConfig} RunnableConfig */
/**
* @param {ServerRequest} req
* @param {Agent} agent
@@ -543,7 +540,7 @@ class AgentClient extends BaseClient {
}
async chatCompletion({ payload, abortController = null }) {
/** @type {Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string }} */
/** @type {Partial<GraphRunnableConfig>} */
let config;
/** @type {ReturnType<createRun>} */
let run;

View File

@@ -75,7 +75,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === 'azureAssistants'
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload."
: ''
}`;
return sendResponse(req, res, messageData, errorMessage);

View File

@@ -228,7 +228,7 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
// Save user message if needed
if (!client.skipSaveUserMessage) {
await saveMessage(req, userMessage, {
context: 'api/server/controllers/agents/request.js - don\'t skip saving user message',
context: "api/server/controllers/agents/request.js - don't skip saving user message",
});
}

View File

@@ -23,6 +23,7 @@ const { updateAction, getActions } = require('~/models/Action');
const { updateAgentProjects } = require('~/models/Agent');
const { getProjectByName } = require('~/models/Project');
const { deleteFileByFilter } = require('~/models/File');
const { revertAgentVersion } = require('~/models/Agent');
const { logger } = require('~/config');
const systemTools = {
@@ -104,11 +105,13 @@ const getAgentHandler = async (req, res) => {
return res.status(404).json({ error: 'Agent not found' });
}
agent.version = agent.versions ? agent.versions.length : 0;
if (agent.avatar && agent.avatar?.source === FileSources.s3) {
const originalUrl = agent.avatar.filepath;
agent.avatar.filepath = await refreshS3Url(agent.avatar);
if (originalUrl !== agent.avatar.filepath) {
await updateAgent({ id }, { avatar: agent.avatar });
await updateAgent({ id }, { avatar: agent.avatar }, { updatingUserId: req.user.id });
}
}
@@ -127,6 +130,7 @@ const getAgentHandler = async (req, res) => {
author: agent.author,
projectIds: agent.projectIds,
isCollaborative: agent.isCollaborative,
version: agent.version,
});
}
return res.status(200).json(agent);
@@ -165,7 +169,9 @@ const updateAgentHandler = async (req, res) => {
}
let updatedAgent =
Object.keys(updateData).length > 0 ? await updateAgent({ id }, updateData) : existingAgent;
Object.keys(updateData).length > 0
? await updateAgent({ id }, updateData, { updatingUserId: req.user.id })
: existingAgent;
if (projectIds || removeProjectIds) {
updatedAgent = await updateAgentProjects({
@@ -187,6 +193,14 @@ const updateAgentHandler = async (req, res) => {
return res.json(updatedAgent);
} catch (error) {
logger.error('[/Agents/:id] Error updating Agent', error);
if (error.statusCode === 409) {
return res.status(409).json({
error: error.message,
details: error.details,
});
}
res.status(500).json({ error: error.message });
}
};
@@ -393,7 +407,11 @@ const uploadAgentAvatarHandler = async (req, res) => {
},
};
promises.push(await updateAgent({ id: agent_id, author: req.user.id }, data));
promises.push(
await updateAgent({ id: agent_id, author: req.user.id }, data, {
updatingUserId: req.user.id,
}),
);
const resolved = await Promise.all(promises);
res.status(201).json(resolved[0]);
@@ -411,6 +429,66 @@ const uploadAgentAvatarHandler = async (req, res) => {
}
};
/**
* Reverts an agent to a previous version from its version history.
* @route PATCH /agents/:id/revert
* @param {object} req - Express Request object
* @param {object} req.params - Request parameters
* @param {string} req.params.id - The ID of the agent to revert
* @param {object} req.body - Request body
* @param {number} req.body.version_index - The index of the version to revert to
* @param {object} req.user - Authenticated user information
* @param {string} req.user.id - User ID
* @param {string} req.user.role - User role
* @param {ServerResponse} res - Express Response object
* @returns {Promise<Agent>} 200 - The updated agent after reverting to the specified version
* @throws {Error} 400 - If version_index is missing
* @throws {Error} 403 - If user doesn't have permission to modify the agent
* @throws {Error} 404 - If agent not found
* @throws {Error} 500 - If there's an internal server error during the reversion process
*/
const revertAgentVersionHandler = async (req, res) => {
try {
const { id } = req.params;
const { version_index } = req.body;
if (version_index === undefined) {
return res.status(400).json({ error: 'version_index is required' });
}
const isAdmin = req.user.role === SystemRoles.ADMIN;
const existingAgent = await getAgent({ id });
if (!existingAgent) {
return res.status(404).json({ error: 'Agent not found' });
}
const isAuthor = existingAgent.author.toString() === req.user.id;
const hasEditPermission = existingAgent.isCollaborative || isAdmin || isAuthor;
if (!hasEditPermission) {
return res.status(403).json({
error: 'You do not have permission to modify this non-collaborative agent',
});
}
const updatedAgent = await revertAgentVersion({ id }, version_index);
if (updatedAgent.author) {
updatedAgent.author = updatedAgent.author.toString();
}
if (updatedAgent.author !== req.user.id) {
delete updatedAgent.author;
}
return res.json(updatedAgent);
} catch (error) {
logger.error('[/agents/:id/revert] Error reverting Agent version', error);
res.status(500).json({ error: error.message });
}
};
module.exports = {
createAgent: createAgentHandler,
getAgent: getAgentHandler,
@@ -419,4 +497,5 @@ module.exports = {
deleteAgent: deleteAgentHandler,
getListAgents: getListAgentsHandler,
uploadAgentAvatar: uploadAgentAvatarHandler,
revertAgentVersion: revertAgentVersionHandler,
};

View File

@@ -78,7 +78,7 @@ const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/ch
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === 'azureAssistants'
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
? " If using Azure OpenAI, files are only available in the region of the assistant's model at the time of upload."
: ''
}`;
return sendResponse(req, res, messageData, errorMessage);

View File

@@ -39,12 +39,10 @@ const createAssistant = async (req, res) => {
const toolDefinitions = req.app.locals.availableTools;
const toolDef = toolDefinitions[tool];
if (!toolDef && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {
return (
Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
// eslint-disable-next-line no-unused-vars
.map(([_, val]) => val)
);
return Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
.map(([_, val]) => val);
}
return toolDef;
@@ -144,12 +142,10 @@ const patchAssistant = async (req, res) => {
const toolDefinitions = req.app.locals.availableTools;
const toolDef = toolDefinitions[tool];
if (!toolDef && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {
return (
Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
// eslint-disable-next-line no-unused-vars
.map(([_, val]) => val)
);
return Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
.map(([_, val]) => val);
}
return toolDef;

View File

@@ -36,12 +36,10 @@ const createAssistant = async (req, res) => {
const toolDefinitions = req.app.locals.availableTools;
const toolDef = toolDefinitions[tool];
if (!toolDef && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {
return (
Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
// eslint-disable-next-line no-unused-vars
.map(([_, val]) => val)
);
return Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
.map(([_, val]) => val);
}
return toolDef;
@@ -131,7 +129,7 @@ const updateAssistant = async ({ req, openai, assistant_id, updateData }) => {
if (!actualTool && manifestToolMap[tool] && manifestToolMap[tool].toolkit === true) {
actualTool = Object.entries(toolDefinitions)
.filter(([key]) => key.startsWith(`${tool}_`))
// eslint-disable-next-line no-unused-vars
.map(([_, val]) => val);
} else if (!actualTool) {
continue;

View File

@@ -1,5 +1,5 @@
const cookies = require('cookie');
const { Issuer } = require('openid-client');
const { getOpenIdConfig } = require('~/strategies');
const { logoutUser } = require('~/server/services/AuthService');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
@@ -10,20 +10,29 @@ const logoutController = async (req, res) => {
const logout = await logoutUser(req, refreshToken);
const { status, message } = logout;
res.clearCookie('refreshToken');
res.clearCookie('token_provider');
const response = { message };
if (
req.user.openidId != null &&
isEnabled(process.env.OPENID_USE_END_SESSION_ENDPOINT) &&
process.env.OPENID_ISSUER
) {
const issuer = await Issuer.discover(process.env.OPENID_ISSUER);
const redirect = issuer.metadata.end_session_endpoint;
if (!redirect) {
const openIdConfig = getOpenIdConfig();
if (!openIdConfig) {
logger.warn(
'[logoutController] end_session_endpoint not found in OpenID issuer metadata. Please verify that the issuer is correct.',
'[logoutController] OpenID config not found. Please verify that the open id configuration and initialization are correct.',
);
} else {
response.redirect = redirect;
const endSessionEndpoint = openIdConfig
? openIdConfig.serverMetadata().end_session_endpoint
: null;
if (endSessionEndpoint) {
response.redirect = endSessionEndpoint;
} else {
logger.warn(
'[logoutController] end_session_endpoint not found in OpenID issuer metadata. Please verify that the issuer is correct.',
);
}
}
}
return res.status(status).send(response);

View File

@@ -6,6 +6,7 @@ const {
Permissions,
ToolCallTypes,
PermissionTypes,
loadWebSearchAuth,
} = require('librechat-data-provider');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
const { processCodeOutput } = require('~/server/services/Files/Code/process');
@@ -24,6 +25,36 @@ const toolAccessPermType = {
[Tools.execute_code]: PermissionTypes.RUN_CODE,
};
/**
* Verifies web search authentication, ensuring each category has at least
* one fully authenticated service.
*
* @param {ServerRequest} req - The request object
* @param {ServerResponse} res - The response object
* @returns {Promise<void>} A promise that resolves when the function has completed
*/
const verifyWebSearchAuth = async (req, res) => {
try {
const userId = req.user.id;
/** @type {TCustomConfig['webSearch']} */
const webSearchConfig = req.app.locals?.webSearch || {};
const result = await loadWebSearchAuth({
userId,
loadAuthValues,
webSearchConfig,
throwError: false,
});
return res.status(200).json({
authenticated: result.authenticated,
authTypes: result.authTypes,
});
} catch (error) {
console.error('Error in verifyWebSearchAuth:', error);
return res.status(500).json({ message: error.message });
}
};
/**
* @param {ServerRequest} req - The request object, containing information about the HTTP request.
* @param {ServerResponse} res - The response object, used to send back the desired HTTP response.
@@ -32,6 +63,9 @@ const toolAccessPermType = {
const verifyToolAuth = async (req, res) => {
try {
const { toolId } = req.params;
if (toolId === Tools.web_search) {
return await verifyWebSearchAuth(req, res);
}
const authFields = fieldsMap[toolId];
if (!authFields) {
res.status(404).json({ message: 'Tool not found' });

View File

@@ -75,7 +75,7 @@ const startServer = async () => {
/* OAUTH */
app.use(passport.initialize());
passport.use(await jwtLogin());
passport.use(jwtLogin());
passport.use(passportLogin());
/* LDAP Auth */
@@ -84,7 +84,7 @@ const startServer = async () => {
}
if (isEnabled(ALLOW_SOCIAL_LOGIN)) {
configureSocialLogins(app);
await configureSocialLogins(app);
}
app.use('/oauth', routes.oauth);

View File

@@ -4,6 +4,10 @@ const request = require('supertest');
const { MongoMemoryServer } = require('mongodb-memory-server');
const mongoose = require('mongoose');
jest.mock('~/server/services/Config/loadCustomConfig', () => {
return jest.fn(() => Promise.resolve({}));
});
describe('Server Configuration', () => {
// Increase the default timeout to allow for Mongo cleanup
jest.setTimeout(30_000);

View File

@@ -327,7 +327,7 @@ const handleAbortError = async (res, req, error, data) => {
errorText = `{"type":"${ErrorTypes.INVALID_REQUEST}"}`;
}
if (error?.message?.includes('does not support \'system\'')) {
if (error?.message?.includes("does not support 'system'")) {
errorText = `{"type":"${ErrorTypes.NO_SYSTEM_MESSAGES}"}`;
}

View File

@@ -34,7 +34,7 @@ async function abortRun(req, res) {
const [thread_id, run_id] = runValues.split(':');
if (!run_id) {
logger.warn('[abortRun] Couldn\'t find run for cancel request', { thread_id });
logger.warn("[abortRun] Couldn't find run for cancel request", { thread_id });
return res.status(204).send({ message: 'Run not found' });
} else if (run_id === 'cancelled') {
logger.warn('[abortRun] Run already cancelled', { thread_id });

View File

@@ -1,9 +1,13 @@
const cookies = require('cookie');
const { isEnabled } = require('~/server/utils');
const passport = require('passport');
// This middleware does not require authentication,
// but if the user is authenticated, it will set the user object.
const optionalJwtAuth = (req, res, next) => {
passport.authenticate('jwt', { session: false }, (err, user) => {
const cookieHeader = req.headers.cookie;
const tokenProvider = cookieHeader ? cookies.parse(cookieHeader).token_provider : null;
const callback = (err, user) => {
if (err) {
return next(err);
}
@@ -11,7 +15,11 @@ const optionalJwtAuth = (req, res, next) => {
req.user = user;
}
next();
})(req, res, next);
};
if (tokenProvider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS)) {
return passport.authenticate('openidJwt', { session: false }, callback)(req, res, next);
}
passport.authenticate('jwt', { session: false }, callback)(req, res, next);
};
module.exports = optionalJwtAuth;

View File

@@ -1,5 +1,23 @@
const passport = require('passport');
const cookies = require('cookie');
const { isEnabled } = require('~/server/utils');
const requireJwtAuth = passport.authenticate('jwt', { session: false });
/**
* Custom Middleware to handle JWT authentication, with support for OpenID token reuse
* Switches between JWT and OpenID authentication based on cookies and environment settings
*/
const requireJwtAuth = (req, res, next) => {
// Check if token provider is specified in cookies
const cookieHeader = req.headers.cookie;
const tokenProvider = cookieHeader ? cookies.parse(cookieHeader).token_provider : null;
// Use OpenID authentication if token provider is OpenID and OPENID_REUSE_TOKENS is enabled
if (tokenProvider === 'openid' && isEnabled(process.env.OPENID_REUSE_TOKENS)) {
return passport.authenticate('openidJwt', { session: false })(req, res, next);
}
// Default to standard JWT authentication
return passport.authenticate('jwt', { session: false })(req, res, next);
};
module.exports = requireJwtAuth;

View File

@@ -1,11 +1,11 @@
jest.mock('~/cache/getLogStores');
const request = require('supertest');
const express = require('express');
const routes = require('../');
const configRoute = require('../config');
// file deepcode ignore UseCsurfForExpress/test: test
const app = express();
app.disable('x-powered-by');
app.use('/api/config', routes.config);
app.use('/api/config', configRoute);
afterEach(() => {
delete process.env.APP_TITLE;
@@ -24,6 +24,12 @@ afterEach(() => {
delete process.env.GITHUB_CLIENT_SECRET;
delete process.env.DISCORD_CLIENT_ID;
delete process.env.DISCORD_CLIENT_SECRET;
delete process.env.SAML_ENTRY_POINT;
delete process.env.SAML_ISSUER;
delete process.env.SAML_CERT;
delete process.env.SAML_SESSION_SECRET;
delete process.env.SAML_BUTTON_LABEL;
delete process.env.SAML_IMAGE_URL;
delete process.env.DOMAIN_SERVER;
delete process.env.ALLOW_REGISTRATION;
delete process.env.ALLOW_SOCIAL_LOGIN;
@@ -37,7 +43,6 @@ afterEach(() => {
//TODO: This works/passes locally but http request tests fail with 404 in CI. Need to figure out why.
// eslint-disable-next-line jest/no-disabled-tests
describe.skip('GET /', () => {
it('should return 200 and the correct body', async () => {
process.env.APP_TITLE = 'Test Title';
@@ -55,6 +60,12 @@ describe.skip('GET /', () => {
process.env.GITHUB_CLIENT_SECRET = 'Test Github client Secret';
process.env.DISCORD_CLIENT_ID = 'Test Discord client Id';
process.env.DISCORD_CLIENT_SECRET = 'Test Discord client Secret';
process.env.SAML_ENTRY_POINT = 'http://test-server.com';
process.env.SAML_ISSUER = 'Test SAML Issuer';
process.env.SAML_CERT = 'saml.pem';
process.env.SAML_SESSION_SECRET = 'Test Secret';
process.env.SAML_BUTTON_LABEL = 'Test SAML';
process.env.SAML_IMAGE_URL = 'http://test-server.com';
process.env.DOMAIN_SERVER = 'http://test-server.com';
process.env.ALLOW_REGISTRATION = 'true';
process.env.ALLOW_SOCIAL_LOGIN = 'true';
@@ -70,7 +81,7 @@ describe.skip('GET /', () => {
expect(response.statusCode).toBe(200);
expect(response.body).toEqual({
appTitle: 'Test Title',
socialLogins: ['google', 'facebook', 'openid', 'github', 'discord'],
socialLogins: ['google', 'facebook', 'openid', 'github', 'discord', 'saml'],
discordLoginEnabled: true,
facebookLoginEnabled: true,
githubLoginEnabled: true,
@@ -78,6 +89,9 @@ describe.skip('GET /', () => {
openidLoginEnabled: true,
openidLabel: 'Test OpenID',
openidImageUrl: 'http://test-server.com',
samlLoginEnabled: true,
samlLabel: 'Test SAML',
samlImageUrl: 'http://test-server.com',
ldap: {
enabled: true,
},

View File

@@ -107,7 +107,15 @@ router.post('/:agent_id', async (req, res) => {
.filter((tool) => !(tool && (tool.includes(domain) || tool.includes(action_id))))
.concat(functions.map((tool) => `${tool.function.name}${actionDelimiter}${domain}`));
const updatedAgent = await updateAgent(agentQuery, { tools, actions });
// Force version update since actions are changing
const updatedAgent = await updateAgent(
agentQuery,
{ tools, actions },
{
updatingUserId: req.user.id,
forceVersion: true,
},
);
// Only update user field for new actions
const actionUpdateData = { metadata, agent_id };
@@ -172,7 +180,12 @@ router.delete('/:agent_id/:action_id', async (req, res) => {
const updatedTools = tools.filter((tool) => !(tool && tool.includes(domain)));
await updateAgent(agentQuery, { tools: updatedTools, actions: updatedActions });
// Force version update since actions are being removed
await updateAgent(
agentQuery,
{ tools: updatedTools, actions: updatedActions },
{ updatingUserId: req.user.id, forceVersion: true },
);
// If admin, can delete any action, otherwise only user's actions
const actionQuery = admin ? { action_id } : { action_id, user: req.user.id };
await deleteAction(actionQuery);

View File

@@ -78,6 +78,15 @@ router.post('/:id/duplicate', checkAgentCreate, v1.duplicateAgent);
*/
router.delete('/:id', checkAgentCreate, v1.deleteAgent);
/**
* Reverts an agent to a previous version.
* @route POST /agents/:id/revert
* @param {string} req.params.id - Agent identifier.
* @param {number} req.body.version_index - Index of the version to revert to.
* @returns {Agent} 200 - success response - application/json
*/
router.post('/:id/revert', checkGlobalAgentShare, v1.revertAgentVersion);
/**
* Returns a list of agents.
* @route GET /agents

View File

@@ -37,6 +37,18 @@ router.get('/', async function (req, res) {
const ldap = getLdapConfig();
try {
const isOpenIdEnabled =
!!process.env.OPENID_CLIENT_ID &&
!!process.env.OPENID_CLIENT_SECRET &&
!!process.env.OPENID_ISSUER &&
!!process.env.OPENID_SESSION_SECRET;
const isSamlEnabled =
!!process.env.SAML_ENTRY_POINT &&
!!process.env.SAML_ISSUER &&
!!process.env.SAML_CERT &&
!!process.env.SAML_SESSION_SECRET;
/** @type {TStartupConfig} */
const payload = {
appTitle: process.env.APP_TITLE || 'LibreChat',
@@ -51,14 +63,13 @@ router.get('/', async function (req, res) {
!!process.env.APPLE_TEAM_ID &&
!!process.env.APPLE_KEY_ID &&
!!process.env.APPLE_PRIVATE_KEY_PATH,
openidLoginEnabled:
!!process.env.OPENID_CLIENT_ID &&
!!process.env.OPENID_CLIENT_SECRET &&
!!process.env.OPENID_ISSUER &&
!!process.env.OPENID_SESSION_SECRET,
openidLoginEnabled: isOpenIdEnabled,
openidLabel: process.env.OPENID_BUTTON_LABEL || 'Continue with OpenID',
openidImageUrl: process.env.OPENID_IMAGE_URL,
openidAutoRedirect: isEnabled(process.env.OPENID_AUTO_REDIRECT),
samlLoginEnabled: !isOpenIdEnabled && isSamlEnabled,
samlLabel: process.env.SAML_BUTTON_LABEL,
samlImageUrl: process.env.SAML_IMAGE_URL,
serverDomain: process.env.DOMAIN_SERVER || 'http://localhost:3080',
emailLoginEnabled,
registrationEnabled: !ldap?.enabled && isEnabled(process.env.ALLOW_REGISTRATION),
@@ -85,6 +96,26 @@ router.get('/', async function (req, res) {
bundlerURL: process.env.SANDPACK_BUNDLER_URL,
staticBundlerURL: process.env.SANDPACK_STATIC_BUNDLER_URL,
};
/** @type {TCustomConfig['webSearch']} */
const webSearchConfig = req.app.locals.webSearch;
if (
webSearchConfig != null &&
(webSearchConfig.searchProvider ||
webSearchConfig.scraperType ||
webSearchConfig.rerankerType)
) {
payload.webSearch = {};
}
if (webSearchConfig?.searchProvider) {
payload.webSearch.searchProvider = webSearchConfig.searchProvider;
}
if (webSearchConfig?.scraperType) {
payload.webSearch.scraperType = webSearchConfig.scraperType;
}
if (webSearchConfig?.rerankerType) {
payload.webSearch.rerankerType = webSearchConfig.rerankerType;
}
if (ldap) {
payload.ldap = ldap;

View File

@@ -74,7 +74,7 @@ router.post('/gen_title', async (req, res) => {
res.status(200).json({ title });
} else {
res.status(404).json({
message: 'Title not found or method not implemented for the conversation\'s endpoint',
message: "Title not found or method not implemented for the conversation's endpoint",
});
}
});

View File

@@ -283,6 +283,10 @@ router.post('/', async (req, res) => {
message += ': ' + error.message;
}
if (error.message?.includes('Invalid file format')) {
message = error.message;
}
// TODO: delete remote file if it exists
try {
await fs.unlink(req.file.path);

View File

@@ -1,6 +1,7 @@
// file deepcode ignore NoRateLimitingForLogin: Rate limiting is handled by the `loginLimiter` middleware
const express = require('express');
const passport = require('passport');
const { randomState } = require('openid-client');
const {
checkBan,
logHeaders,
@@ -8,7 +9,8 @@ const {
setBalanceConfig,
checkDomainAllowed,
} = require('~/server/middleware');
const { setAuthTokens } = require('~/server/services/AuthService');
const { setAuthTokens, setOpenIDAuthTokens } = require('~/server/services/AuthService');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();
@@ -28,7 +30,15 @@ const oauthHandler = async (req, res) => {
if (req.banned) {
return;
}
await setAuthTokens(req.user._id, res);
if (
req.user &&
req.user.provider == 'openid' &&
isEnabled(process.env.OPENID_REUSE_TOKENS) === true
) {
setOpenIDAuthTokens(req.user.tokenset, res);
} else {
await setAuthTokens(req.user._id, res);
}
res.redirect(domains.client);
} catch (err) {
logger.error('Error in setting authentication tokens:', err);
@@ -94,12 +104,12 @@ router.get(
/**
* OpenID Routes
*/
router.get(
'/openid',
passport.authenticate('openid', {
router.get('/openid', (req, res, next) => {
return passport.authenticate('openid', {
session: false,
}),
);
state: randomState(),
})(req, res, next);
});
router.get(
'/openid/callback',
@@ -179,4 +189,24 @@ router.post(
oauthHandler,
);
/**
* SAML Routes
*/
router.get(
'/saml',
passport.authenticate('saml', {
session: false,
}),
);
router.post(
'/saml/callback',
passport.authenticate('saml', {
failureRedirect: `${domains.client}/oauth/error`,
failureMessage: true,
session: false,
}),
oauthHandler,
);
module.exports = router;

View File

@@ -54,9 +54,7 @@ router.get('/', requireJwtAuth, async (req, res) => {
sortDirection: ['asc', 'desc'].includes(req.query.sortDirection)
? req.query.sortDirection
: 'desc',
search: req.query.search
? decodeURIComponent(req.query.search.trim())
: undefined,
search: req.query.search ? decodeURIComponent(req.query.search.trim()) : undefined,
};
const result = await getSharedLinks(

View File

@@ -3,7 +3,7 @@
* @readonly
* @enum {string}
*/
// eslint-disable-next-line no-unused-vars
const Tools = {
code_interpreter: 'code_interpreter',
retrieval: 'retrieval',

View File

@@ -207,7 +207,7 @@ async function createActionTool({
state: stateToken,
userId: userId,
client_url: metadata.auth.client_url,
redirect_uri: `${process.env.DOMAIN_CLIENT}/api/actions/${action_id}/oauth/callback`,
redirect_uri: `${process.env.DOMAIN_SERVER}/api/actions/${action_id}/oauth/callback`,
/** Encrypted values */
encrypted_oauth_client_id: encrypted.oauth_client_id,
encrypted_oauth_client_secret: encrypted.oauth_client_secret,

View File

@@ -25,6 +25,7 @@ jest.mock('./start/checks', () => ({
checkHealth: jest.fn(),
checkConfig: jest.fn(),
checkAzureVariables: jest.fn(),
checkWebSearchConfig: jest.fn(),
}));
const AppService = require('./AppService');

View File

@@ -1,11 +1,18 @@
const {
FileSources,
EModelEndpoint,
loadOCRConfig,
processMCPEnv,
EModelEndpoint,
getConfigDefaults,
loadWebSearchConfig,
} = require('librechat-data-provider');
const { checkVariables, checkHealth, checkConfig, checkAzureVariables } = require('./start/checks');
const {
checkHealth,
checkConfig,
checkVariables,
checkAzureVariables,
checkWebSearchConfig,
} = require('./start/checks');
const { azureAssistantsDefaults, assistantsConfigSetup } = require('./start/assistants');
const { initializeAzureBlobService } = require('./Files/Azure/initialize');
const { initializeFirebase } = require('./Files/Firebase/initialize');
@@ -35,6 +42,8 @@ const AppService = async (app) => {
const configDefaults = getConfigDefaults();
const ocr = loadOCRConfig(config.ocr);
const webSearch = loadWebSearchConfig(config.webSearch);
checkWebSearchConfig(webSearch);
const filteredTools = config.filteredTools;
const includedTools = config.includedTools;
const fileStrategy = config.fileStrategy ?? configDefaults.fileStrategy;
@@ -79,6 +88,7 @@ const AppService = async (app) => {
const defaultLocals = {
ocr,
paths,
webSearch,
fileStrategy,
socialLogins,
filteredTools,

View File

@@ -141,6 +141,14 @@ describe('AppService', () => {
balance: { enabled: true },
filteredTools: undefined,
includedTools: undefined,
webSearch: {
cohereApiKey: '${COHERE_API_KEY}',
firecrawlApiKey: '${FIRECRAWL_API_KEY}',
firecrawlApiUrl: '${FIRECRAWL_API_URL}',
jinaApiKey: '${JINA_API_KEY}',
safeSearch: 1,
serperApiKey: '${SERPER_API_KEY}',
},
});
});
@@ -537,7 +545,7 @@ describe('AppService updating app.locals and issuing warnings', () => {
const { logger } = require('~/config');
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'The \'assistants\' endpoint has both \'supportedIds\' and \'excludedIds\' defined.',
"The 'assistants' endpoint has both 'supportedIds' and 'excludedIds' defined.",
),
);
});
@@ -559,7 +567,7 @@ describe('AppService updating app.locals and issuing warnings', () => {
const { logger } = require('~/config');
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'The \'assistants\' endpoint has both \'privateAssistants\' and \'supportedIds\' or \'excludedIds\' defined.',
"The 'assistants' endpoint has both 'privateAssistants' and 'supportedIds' or 'excludedIds' defined.",
),
);
});

View File

@@ -89,9 +89,9 @@ describe('replaceArtifactContent', () => {
};
test('should replace content within artifact boundaries', () => {
const original = 'console.log(\'hello\')';
const original = "console.log('hello')";
const artifact = createTestArtifact(original);
const updated = 'console.log(\'updated\')';
const updated = "console.log('updated')";
const result = replaceArtifactContent(artifact.text, artifact, original, updated);
expect(result).toContain(updated);

View File

@@ -377,13 +377,62 @@ const setAuthTokens = async (userId, res, sessionId = null) => {
secure: isProduction,
sameSite: 'strict',
});
res.cookie('token_provider', 'librechat', {
expires: new Date(refreshTokenExpires),
httpOnly: true,
secure: isProduction,
sameSite: 'strict',
});
return token;
} catch (error) {
logger.error('[setAuthTokens] Error in setting authentication tokens:', error);
throw error;
}
};
/**
* @function setOpenIDAuthTokens
* Set OpenID Authentication Tokens
* //type tokenset from openid-client
* @param {import('openid-client').TokenEndpointResponse & import('openid-client').TokenEndpointResponseHelpers} tokenset
* - The tokenset object containing access and refresh tokens
* @param {Object} res - response object
* @returns {String} - access token
*/
const setOpenIDAuthTokens = (tokenset, res) => {
try {
if (!tokenset) {
logger.error('[setOpenIDAuthTokens] No tokenset found in request');
return;
}
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
const expiryInMilliseconds = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7; // 7 days default
const expirationDate = new Date(Date.now() + expiryInMilliseconds);
if (tokenset == null) {
logger.error('[setOpenIDAuthTokens] No tokenset found in request');
return;
}
if (!tokenset.access_token || !tokenset.refresh_token) {
logger.error('[setOpenIDAuthTokens] No access or refresh token found in tokenset');
return;
}
res.cookie('refreshToken', tokenset.refresh_token, {
expires: expirationDate,
httpOnly: true,
secure: isProduction,
sameSite: 'strict',
});
res.cookie('token_provider', 'openid', {
expires: expirationDate,
httpOnly: true,
secure: isProduction,
sameSite: 'strict',
});
return tokenset.access_token;
} catch (error) {
logger.error('[setOpenIDAuthTokens] Error in setting authentication tokens:', error);
throw error;
}
};
/**
* Resend Verification Email
@@ -452,4 +501,5 @@ module.exports = {
resetPassword,
requestPasswordReset,
resendVerificationEmail,
setOpenIDAuthTokens,
};

View File

@@ -32,14 +32,14 @@ async function loadAsyncEndpoints(req) {
const gptPlugins =
useAzure || openAIApiKey || azureOpenAIApiKey
? {
availableAgents: ['classic', 'functions'],
userProvide: useAzure ? false : userProvidedOpenAI,
userProvideURL: useAzure
? false
: config[EModelEndpoint.openAI]?.userProvideURL ||
availableAgents: ['classic', 'functions'],
userProvide: useAzure ? false : userProvidedOpenAI,
userProvideURL: useAzure
? false
: config[EModelEndpoint.openAI]?.userProvideURL ||
config[EModelEndpoint.azureOpenAI]?.userProvideURL,
azure: useAzurePlugins || useAzure,
}
azure: useAzurePlugins || useAzure,
}
: false;
return { google, gptPlugins };

View File

@@ -15,20 +15,14 @@ function checkPromptCacheSupport(modelName) {
return false;
}
if (
modelMatch === 'claude-3-7-sonnet' ||
modelMatch === 'claude-3-5-sonnet' ||
modelMatch === 'claude-3-5-haiku' ||
modelMatch === 'claude-3-haiku' ||
modelMatch === 'claude-3-opus' ||
modelMatch === 'claude-3.7-sonnet' ||
modelMatch === 'claude-3.5-sonnet' ||
modelMatch === 'claude-3.5-haiku'
) {
return true;
}
return false;
return (
/claude-3[-.]7/.test(modelMatch) ||
/claude-3[-.]5-(?:sonnet|haiku)/.test(modelMatch) ||
/claude-3-(?:sonnet|haiku|opus)?/.test(modelMatch) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(modelMatch) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(modelMatch) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(modelMatch)
);
}
/**
@@ -51,6 +45,14 @@ function getClaudeHeaders(model, supportsCacheControl) {
'anthropic-beta':
'token-efficient-tools-2025-02-19,output-128k-2025-02-19,prompt-caching-2024-07-31',
};
} else if (
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(model) ||
/claude-[4-9]-(?:sonnet|opus|haiku)?/.test(model) ||
/claude-4(?:-(?:sonnet|opus|haiku))?/.test(model)
) {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
} else {
return {
'anthropic-beta': 'prompt-caching-2024-07-31',
@@ -72,7 +74,8 @@ function configureReasoning(anthropicInput, extendedOptions = {}) {
if (
extendedOptions.thinking &&
updatedOptions?.model &&
/claude-3[-.]7/.test(updatedOptions.model)
(/claude-3[-.]7/.test(updatedOptions.model) ||
/claude-(?:sonnet|opus|haiku)-[4-9]/.test(updatedOptions.model))
) {
updatedOptions.thinking = {
type: 'enabled',

View File

@@ -3,7 +3,6 @@ const generateArtifactsPrompt = require('~/app/clients/prompts/artifacts');
const { getAssistant } = require('~/models/Assistant');
const buildOptions = async (endpoint, parsedBody) => {
const { promptPrefix, assistant_id, iconURL, greeting, spec, artifacts, ...modelOptions } =
parsedBody;
const endpointOption = removeNullishValues({

View File

@@ -25,10 +25,10 @@ const getOptions = async ({ req, overrideModel, endpointOption }) => {
let credentials = isUserProvided
? await getUserKey({ userId: req.user.id, name: EModelEndpoint.bedrock })
: {
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
};
accessKeyId: BEDROCK_AWS_ACCESS_KEY_ID,
secretAccessKey: BEDROCK_AWS_SECRET_ACCESS_KEY,
...(BEDROCK_AWS_SESSION_TOKEN && { sessionToken: BEDROCK_AWS_SESSION_TOKEN }),
};
if (!credentials) {
throw new Error('Bedrock credentials not provided. Please provide them again.');

View File

@@ -25,9 +25,9 @@ const initializeClient = async ({ req, res, endpointOption, overrideModel, optio
const credentials = isUserProvided
? userKey
: {
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
[AuthKeys.GOOGLE_SERVICE_KEY]: serviceKey,
[AuthKeys.GOOGLE_API_KEY]: GOOGLE_KEY,
};
let clientOptions = {};

View File

@@ -94,7 +94,7 @@ function getLLMConfig(credentials, options = {}) {
// Extract from credentials
const serviceKeyRaw = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
const serviceKey =
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : serviceKeyRaw ?? {};
typeof serviceKeyRaw === 'string' ? JSON.parse(serviceKeyRaw) : (serviceKeyRaw ?? {});
const project_id = serviceKey?.project_id ?? null;
const apiKey = creds[AuthKeys.GOOGLE_API_KEY] ?? null;

View File

@@ -114,11 +114,11 @@ describe('gptPlugins/initializeClient', () => {
test('should initialize PluginsClient with Azure credentials when PLUGINS_USE_AZURE is true', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.PLUGINS_USE_AZURE = 'true');
process.env.DEBUG_PLUGINS = 'false';
process.env.OPENAI_SUMMARIZE = 'false';

View File

@@ -112,11 +112,11 @@ describe('initializeClient', () => {
test('should initialize client with Azure credentials when endpoint is azureOpenAI', async () => {
process.env.AZURE_API_KEY = 'test-azure-api-key';
(process.env.AZURE_OPENAI_API_INSTANCE_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.OPENAI_API_KEY = 'test-openai-api-key');
(process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_VERSION = 'some-value'),
(process.env.AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME = 'some-value'),
(process.env.AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME = 'some-value'),
(process.env.OPENAI_API_KEY = 'test-openai-api-key');
process.env.DEBUG_OPENAI = 'false';
process.env.OPENAI_SUMMARIZE = 'false';

View File

@@ -2,7 +2,12 @@
const fs = require('fs');
const path = require('path');
const FormData = require('form-data');
const { FileSources, envVarRegex, extractEnvVariable } = require('librechat-data-provider');
const {
FileSources,
envVarRegex,
extractEnvVariable,
extractVariableName,
} = require('librechat-data-provider');
const { loadAuthValues } = require('~/server/services/Tools/credentials');
const { logger, createAxiosInstance } = require('~/config');
const { logAxiosError } = require('~/utils/axios');
@@ -42,7 +47,6 @@ async function uploadDocumentToMistral({
})
.then((res) => res.data)
.catch((error) => {
logger.error('Error uploading document to Mistral:', error.message);
throw error;
});
}
@@ -108,11 +112,6 @@ async function performOCR({
});
}
function extractVariableName(str) {
const match = str.match(envVarRegex);
return match ? match[1] : null;
}
/**
* Uploads a file to the Mistral OCR API and processes the OCR result.
*
@@ -217,8 +216,16 @@ const uploadMistralOCR = async ({ req, file, file_id, entity_id }) => {
images,
};
} catch (error) {
const message = 'Error uploading document to Mistral OCR API';
throw new Error(logAxiosError({ error, message }));
let message = 'Error uploading document to Mistral OCR API';
const detail = error?.response?.data?.detail;
if (detail && detail !== '') {
message = detail;
}
const responseMessage = error?.response?.data?.message;
throw new Error(
`${logAxiosError({ error, message })}${responseMessage && responseMessage !== '' ? ` - ${responseMessage}` : ''}`,
);
}
};

View File

@@ -124,13 +124,7 @@ describe('MistralOCR Service', () => {
fileName: 'test.pdf',
apiKey: 'test-api-key',
}),
).rejects.toThrow();
const { logger } = require('~/config');
expect(logger.error).toHaveBeenCalledWith(
expect.stringContaining('Error uploading document to Mistral:'),
expect.any(String),
);
).rejects.toThrow(errorMessage);
});
});

View File

@@ -66,16 +66,26 @@ const getUserPluginAuthValue = async (userId, authField, throwError = true) => {
// }
// };
/**
*
* @async
* @param {string} userId
* @param {string} authField
* @param {string} pluginKey
* @param {string} value
* @returns {Promise<IPluginAuth>}
* @throws {Error}
*/
const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
try {
const encryptedValue = await encrypt(value);
const pluginAuth = await PluginAuth.findOne({ userId, authField }).lean();
if (pluginAuth) {
const pluginAuth = await PluginAuth.updateOne(
return await PluginAuth.findOneAndUpdate(
{ userId, authField },
{ $set: { value: encryptedValue } },
);
return pluginAuth;
{ new: true, upsert: true },
).lean();
} else {
const newPluginAuth = await new PluginAuth({
userId,
@@ -84,7 +94,7 @@ const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
pluginKey,
});
await newPluginAuth.save();
return newPluginAuth;
return newPluginAuth.toObject();
}
} catch (err) {
logger.error('[updateUserPluginAuth]', err);
@@ -92,6 +102,14 @@ const updateUserPluginAuth = async (userId, authField, pluginKey, value) => {
}
};
/**
* @async
* @param {string} userId
* @param {string} authField
* @param {boolean} [all]
* @returns {Promise<import('mongoose').DeleteResult>}
* @throws {Error}
*/
const deleteUserPluginAuth = async (userId, authField, all = false) => {
if (all) {
try {

View File

@@ -302,7 +302,7 @@ class StreamRunManager {
for (const d of delta[key]) {
if (typeof d === 'object' && !Object.prototype.hasOwnProperty.call(d, 'index')) {
logger.warn('Expected an object with an \'index\' for array updates but got:', d);
logger.warn("Expected an object with an 'index' for array updates but got:", d);
continue;
}

View File

@@ -255,7 +255,7 @@ describe('processMessages', () => {
type: 'text',
text: {
value:
'The text you have uploaded is from the book "Harry Potter and the Philosopher\'s Stone" by J.K. Rowling. It follows the story of a young boy named Harry Potter who discovers that he is a wizard on his eleventh birthday. Here are some key points of the narrative:\n\n1. **Discovery and Invitation to Hogwarts**: Harry learns that he is a wizard and receives an invitation to attend Hogwarts School of Witchcraft and Wizardry【11:2†source】【11:4†source】.\n\n2. **Shopping for Supplies**: Hagrid takes Harry to Diagon Alley to buy his school supplies, including his wand from Ollivander\'s【11:9†source】【11:14†source】.\n\n3. **Introduction to Hogwarts**: Harry is introduced to Hogwarts, the magical school where he will learn about magic and discover more about his own background【11:12†source】【11:18†source】.\n\n4. **Meeting Friends and Enemies**: At Hogwarts, Harry makes friends like Ron Weasley and Hermione Granger, and enemies like Draco Malfoy【11:16†source】.\n\n5. **Uncovering the Mystery**: Harry, along with Ron and Hermione, uncovers the mystery of the Philosopher\'s Stone and its connection to the dark wizard Voldemort【11:1†source】【11:10†source】【11:7†source】.\n\nThese points highlight Harry\'s initial experiences in the magical world and set the stage for his adventures at Hogwarts.',
"The text you have uploaded is from the book \"Harry Potter and the Philosopher's Stone\" by J.K. Rowling. It follows the story of a young boy named Harry Potter who discovers that he is a wizard on his eleventh birthday. Here are some key points of the narrative:\n\n1. **Discovery and Invitation to Hogwarts**: Harry learns that he is a wizard and receives an invitation to attend Hogwarts School of Witchcraft and Wizardry【11:2†source】【11:4†source】.\n\n2. **Shopping for Supplies**: Hagrid takes Harry to Diagon Alley to buy his school supplies, including his wand from Ollivander's【11:9†source】【11:14†source】.\n\n3. **Introduction to Hogwarts**: Harry is introduced to Hogwarts, the magical school where he will learn about magic and discover more about his own background【11:12†source】【11:18†source】.\n\n4. **Meeting Friends and Enemies**: At Hogwarts, Harry makes friends like Ron Weasley and Hermione Granger, and enemies like Draco Malfoy【11:16†source】.\n\n5. **Uncovering the Mystery**: Harry, along with Ron and Hermione, uncovers the mystery of the Philosopher's Stone and its connection to the dark wizard Voldemort【11:1†source】【11:10†source】【11:7†source】.\n\nThese points highlight Harry's initial experiences in the magical world and set the stage for his adventures at Hogwarts.",
annotations: [
{
type: 'file_citation',
@@ -424,7 +424,7 @@ These points highlight Harry's initial experiences in the magical world and set
type: 'text',
text: {
value:
'The text you have uploaded is from the book "Harry Potter and the Philosopher\'s Stone" by J.K. Rowling. It follows the story of a young boy named Harry Potter who discovers that he is a wizard on his eleventh birthday. Here are some key points of the narrative:\n\n1. **Discovery and Invitation to Hogwarts**: Harry learns that he is a wizard and receives an invitation to attend Hogwarts School of Witchcraft and Wizardry【11:2†source】【11:4†source】.\n\n2. **Shopping for Supplies**: Hagrid takes Harry to Diagon Alley to buy his school supplies, including his wand from Ollivander\'s【11:9†source】【11:14†source】.\n\n3. **Introduction to Hogwarts**: Harry is introduced to Hogwarts, the magical school where he will learn about magic and discover more about his own background【11:12†source】【11:18†source】.\n\n4. **Meeting Friends and Enemies**: At Hogwarts, Harry makes friends like Ron Weasley and Hermione Granger, and enemies like Draco Malfoy【11:16†source】.\n\n5. **Uncovering the Mystery**: Harry, along with Ron and Hermione, uncovers the mystery of the Philosopher\'s Stone and its connection to the dark wizard Voldemort【11:1†source】【11:10†source】【11:7†source】.\n\nThese points highlight Harry\'s initial experiences in the magical world and set the stage for his adventures at Hogwarts.',
"The text you have uploaded is from the book \"Harry Potter and the Philosopher's Stone\" by J.K. Rowling. It follows the story of a young boy named Harry Potter who discovers that he is a wizard on his eleventh birthday. Here are some key points of the narrative:\n\n1. **Discovery and Invitation to Hogwarts**: Harry learns that he is a wizard and receives an invitation to attend Hogwarts School of Witchcraft and Wizardry【11:2†source】【11:4†source】.\n\n2. **Shopping for Supplies**: Hagrid takes Harry to Diagon Alley to buy his school supplies, including his wand from Ollivander's【11:9†source】【11:14†source】.\n\n3. **Introduction to Hogwarts**: Harry is introduced to Hogwarts, the magical school where he will learn about magic and discover more about his own background【11:12†source】【11:18†source】.\n\n4. **Meeting Friends and Enemies**: At Hogwarts, Harry makes friends like Ron Weasley and Hermione Granger, and enemies like Draco Malfoy【11:16†source】.\n\n5. **Uncovering the Mystery**: Harry, along with Ron and Hermione, uncovers the mystery of the Philosopher's Stone and its connection to the dark wizard Voldemort【11:1†source】【11:10†source】【11:7†source】.\n\nThese points highlight Harry's initial experiences in the magical world and set the stage for his adventures at Hogwarts.",
annotations: [
{
type: 'file_citation',
@@ -582,7 +582,7 @@ These points highlight Harry's initial experiences in the magical world and set
type: 'text',
text: {
value:
'This is a test ^1^ with pre-existing citation-like text. Here\'s a real citation【11:2†source】.',
"This is a test ^1^ with pre-existing citation-like text. Here's a real citation【11:2†source】.",
annotations: [
{
type: 'file_citation',
@@ -610,7 +610,7 @@ These points highlight Harry's initial experiences in the magical world and set
});
const expectedText =
'This is a test ^1^ with pre-existing citation-like text. Here\'s a real citation^1^.\n\n^1.^ test.txt';
"This is a test ^1^ with pre-existing citation-like text. Here's a real citation^1^.\n\n^1.^ test.txt";
expect(result.text).toBe(expectedText);
expect(result.edited).toBe(true);

View File

@@ -1,10 +1,11 @@
const fs = require('fs');
const path = require('path');
const { zodToJsonSchema } = require('zod-to-json-schema');
const { tool: toolFn, Tool, DynamicStructuredTool } = require('@langchain/core/tools');
const { Calculator } = require('@langchain/community/tools/calculator');
const { tool: toolFn, Tool, DynamicStructuredTool } = require('@langchain/core/tools');
const {
Tools,
Constants,
ErrorTypes,
ContentTypes,
imageGenTools,
@@ -14,6 +15,7 @@ const {
ImageVisionTool,
openapiToFunction,
AgentCapabilities,
defaultAgentCapabilities,
validateAndParseOpenAPISpec,
} = require('librechat-data-provider');
const {
@@ -29,6 +31,7 @@ const {
toolkits,
} = require('~/app/clients/tools');
const { processFileURL, uploadImageBuffer } = require('~/server/services/Files/process');
const { createOnSearchResults } = require('~/server/services/Tools/search');
const { isActionDomainAllowed } = require('~/server/services/domains');
const { getEndpointsConfig } = require('~/server/services/Config');
const { recordUsage } = require('~/server/services/Threads');
@@ -500,15 +503,33 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
}
const endpointsConfig = await getEndpointsConfig(req);
const enabledCapabilities = new Set(endpointsConfig?.[EModelEndpoint.agents]?.capabilities ?? []);
const checkCapability = (capability) => enabledCapabilities.has(capability);
let enabledCapabilities = new Set(endpointsConfig?.[EModelEndpoint.agents]?.capabilities ?? []);
/** Edge case: use defined/fallback capabilities when the "agents" endpoint is not enabled */
if (enabledCapabilities.size === 0 && agent.id === Constants.EPHEMERAL_AGENT_ID) {
enabledCapabilities = new Set(
req.app?.locals?.[EModelEndpoint.agents]?.capabilities ?? defaultAgentCapabilities,
);
}
const checkCapability = (capability) => {
const enabled = enabledCapabilities.has(capability);
if (!enabled) {
logger.warn(
`Capability "${capability}" disabled${capability === AgentCapabilities.tools ? '.' : ' despite configured tool.'} User: ${req.user.id} | Agent: ${agent.id}`,
);
}
return enabled;
};
const areToolsEnabled = checkCapability(AgentCapabilities.tools);
let includesWebSearch = false;
const _agentTools = agent.tools?.filter((tool) => {
if (tool === Tools.file_search) {
return checkCapability(AgentCapabilities.file_search);
} else if (tool === Tools.execute_code) {
return checkCapability(AgentCapabilities.execute_code);
} else if (tool === Tools.web_search) {
includesWebSearch = checkCapability(AgentCapabilities.web_search);
return includesWebSearch;
} else if (!areToolsEnabled && !tool.includes(actionDelimiter)) {
return false;
}
@@ -518,7 +539,11 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
if (!_agentTools || _agentTools.length === 0) {
return {};
}
/** @type {ReturnType<createOnSearchResults>} */
let webSearchCallbacks;
if (includesWebSearch) {
webSearchCallbacks = createOnSearchResults(res);
}
const { loadedTools, toolContextMap } = await loadTools({
agent,
functions: true,
@@ -532,6 +557,7 @@ async function loadAgentTools({ req, res, agent, tool_resources, openAIApiKey })
uploadImageBuffer,
returnMetadata: true,
fileStrategy: req.app.locals.fileStrategy,
[Tools.web_search]: webSearchCallbacks,
},
});

View File

@@ -0,0 +1,122 @@
const { nanoid } = require('nanoid');
const { Tools } = require('librechat-data-provider');
const { logger } = require('~/config');
/**
* Creates a function to handle search results and stream them as attachments
* @param {import('http').ServerResponse} res - The HTTP server response object
* @returns {{ onSearchResults: function(SearchResult, GraphRunnableConfig): void; onGetHighlights: function(string): void}} - Function that takes search results and returns or streams an attachment
*/
function createOnSearchResults(res) {
const context = {
sourceMap: new Map(),
searchResultData: undefined,
toolCallId: undefined,
attachmentName: undefined,
messageId: undefined,
conversationId: undefined,
};
/**
* @param {SearchResult} results
* @param {GraphRunnableConfig} runnableConfig
*/
function onSearchResults(results, runnableConfig) {
logger.info(
`[onSearchResults] user: ${runnableConfig.metadata.user_id} | thread_id: ${runnableConfig.metadata.thread_id} | run_id: ${runnableConfig.metadata.run_id}`,
results,
);
if (!results.success) {
logger.error(
`[onSearchResults] user: ${runnableConfig.metadata.user_id} | thread_id: ${runnableConfig.metadata.thread_id} | run_id: ${runnableConfig.metadata.run_id} | error: ${results.error}`,
);
return;
}
const turn = runnableConfig.toolCall?.turn ?? 0;
const data = { turn, ...structuredClone(results.data ?? {}) };
context.searchResultData = data;
// Map sources to links
for (let i = 0; i < data.organic.length; i++) {
const source = data.organic[i];
if (source.link) {
context.sourceMap.set(source.link, {
type: 'organic',
index: i,
turn,
});
}
}
for (let i = 0; i < data.topStories.length; i++) {
const source = data.topStories[i];
if (source.link) {
context.sourceMap.set(source.link, {
type: 'topStories',
index: i,
turn,
});
}
}
context.toolCallId = runnableConfig.toolCall.id;
context.messageId = runnableConfig.metadata.run_id;
context.conversationId = runnableConfig.metadata.thread_id;
context.attachmentName = `${runnableConfig.toolCall.name}_${context.toolCallId}_${nanoid()}`;
const attachment = buildAttachment(context);
if (!res.headersSent) {
return attachment;
}
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
}
/**
* @param {string} link
* @returns {void}
*/
function onGetHighlights(link) {
const source = context.sourceMap.get(link);
if (!source) {
return;
}
const { type, index } = source;
const data = context.searchResultData;
if (!data) {
return;
}
if (data[type][index] != null) {
data[type][index].processed = true;
}
const attachment = buildAttachment(context);
res.write(`event: attachment\ndata: ${JSON.stringify(attachment)}\n\n`);
}
return {
onSearchResults,
onGetHighlights,
};
}
/**
* Helper function to build an attachment object
* @param {object} context - The context containing attachment data
* @returns {object} - The attachment object
*/
function buildAttachment(context) {
return {
messageId: context.messageId,
toolCallId: context.toolCallId,
conversationId: context.conversationId,
name: context.attachmentName,
type: Tools.web_search,
[Tools.web_search]: context.searchResultData,
};
}
module.exports = {
createOnSearchResults,
};

View File

@@ -1,7 +1,9 @@
const {
Constants,
webSearchKeys,
deprecatedAzureVariables,
conflictingAzureVariables,
extractVariableName,
} = require('librechat-data-provider');
const { isEnabled, checkEmailConfig } = require('~/server/utils');
const { logger } = require('~/config');
@@ -141,4 +143,56 @@ function checkPasswordReset() {
}
}
module.exports = { checkVariables, checkHealth, checkConfig, checkAzureVariables };
/**
* Checks web search configuration values to ensure they are environment variable references.
* Warns if actual API keys or URLs are used instead of environment variable references.
* Logs debug information for properly configured environment variable references.
* @param {Object} webSearchConfig - The loaded web search configuration object.
*/
function checkWebSearchConfig(webSearchConfig) {
if (!webSearchConfig) {
return;
}
webSearchKeys.forEach((key) => {
const value = webSearchConfig[key];
if (typeof value === 'string') {
const varName = extractVariableName(value);
if (varName) {
// This is a proper environment variable reference
const actualValue = process.env[varName];
if (actualValue) {
logger.debug(`Web search ${key}: Using environment variable ${varName} with value set`);
} else {
logger.debug(
`Web search ${key}: Using environment variable ${varName} (not set in environment, user provided value)`,
);
}
} else {
// This is not an environment variable reference - warn user
logger.warn(
`❗ Web search configuration error: ${key} contains an actual value instead of an environment variable reference.
Current value: "${value.substring(0, 10)}..."
This is incorrect! You should use environment variable references in your librechat.yaml file, such as:
${key}: "\${YOUR_ENV_VAR_NAME}"
Then set the actual API key in your .env file or environment variables.
More info: https://www.librechat.ai/docs/configuration/librechat_yaml/web_search`,
);
}
}
});
}
module.exports = {
checkHealth,
checkConfig,
checkVariables,
checkAzureVariables,
checkWebSearchConfig,
};

View File

@@ -0,0 +1,203 @@
// Mock librechat-data-provider
jest.mock('librechat-data-provider', () => ({
...jest.requireActual('librechat-data-provider'),
extractVariableName: jest.fn(),
}));
// Mock the config logger
jest.mock('~/config', () => ({
logger: {
debug: jest.fn(),
warn: jest.fn(),
},
}));
const { checkWebSearchConfig } = require('./checks');
const { logger } = require('~/config');
const { extractVariableName } = require('librechat-data-provider');
describe('checkWebSearchConfig', () => {
let originalEnv;
beforeEach(() => {
// Clear all mocks
jest.clearAllMocks();
// Store original environment
originalEnv = process.env;
// Reset process.env
process.env = { ...originalEnv };
});
afterEach(() => {
// Restore original environment
process.env = originalEnv;
});
describe('when webSearchConfig is undefined or null', () => {
it('should return early without logging when config is undefined', () => {
checkWebSearchConfig(undefined);
expect(logger.debug).not.toHaveBeenCalled();
expect(logger.warn).not.toHaveBeenCalled();
});
it('should return early without logging when config is null', () => {
checkWebSearchConfig(null);
expect(logger.debug).not.toHaveBeenCalled();
expect(logger.warn).not.toHaveBeenCalled();
});
});
describe('when config values are proper environment variable references', () => {
it('should log debug message for each valid environment variable with value set', () => {
const config = {
serperApiKey: '${SERPER_API_KEY}',
jinaApiKey: '${JINA_API_KEY}',
};
extractVariableName.mockReturnValueOnce('SERPER_API_KEY').mockReturnValueOnce('JINA_API_KEY');
process.env.SERPER_API_KEY = 'test-serper-key';
process.env.JINA_API_KEY = 'test-jina-key';
checkWebSearchConfig(config);
expect(extractVariableName).toHaveBeenCalledWith('${SERPER_API_KEY}');
expect(extractVariableName).toHaveBeenCalledWith('${JINA_API_KEY}');
expect(logger.debug).toHaveBeenCalledWith(
'Web search serperApiKey: Using environment variable SERPER_API_KEY with value set',
);
expect(logger.debug).toHaveBeenCalledWith(
'Web search jinaApiKey: Using environment variable JINA_API_KEY with value set',
);
expect(logger.warn).not.toHaveBeenCalled();
});
it('should log debug message for environment variables not set in environment', () => {
const config = {
cohereApiKey: '${COHERE_API_KEY}',
};
extractVariableName.mockReturnValue('COHERE_API_KEY');
delete process.env.COHERE_API_KEY;
checkWebSearchConfig(config);
expect(logger.debug).toHaveBeenCalledWith(
'Web search cohereApiKey: Using environment variable COHERE_API_KEY (not set in environment, user provided value)',
);
expect(logger.warn).not.toHaveBeenCalled();
});
});
describe('when config values are actual values instead of environment variable references', () => {
it('should warn when serperApiKey contains actual API key', () => {
const config = {
serperApiKey: 'sk-1234567890abcdef',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'❗ Web search configuration error: serperApiKey contains an actual value',
),
);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Current value: "sk-1234567..."'),
);
expect(logger.debug).not.toHaveBeenCalled();
});
it('should warn when firecrawlApiUrl contains actual URL', () => {
const config = {
firecrawlApiUrl: 'https://api.firecrawl.dev',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'❗ Web search configuration error: firecrawlApiUrl contains an actual value',
),
);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Current value: "https://ap..."'),
);
});
it('should include documentation link in warning message', () => {
const config = {
firecrawlApiKey: 'fc-actual-key',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining(
'More info: https://www.librechat.ai/docs/configuration/librechat_yaml/web_search',
),
);
});
});
describe('when config contains mixed value types', () => {
it('should only process string values and ignore non-string values', () => {
const config = {
serperApiKey: '${SERPER_API_KEY}',
safeSearch: 1,
scraperTimeout: 7500,
jinaApiKey: 'actual-key',
};
extractVariableName.mockReturnValueOnce('SERPER_API_KEY').mockReturnValueOnce(null);
process.env.SERPER_API_KEY = 'test-key';
checkWebSearchConfig(config);
expect(extractVariableName).toHaveBeenCalledTimes(2);
expect(logger.debug).toHaveBeenCalledTimes(1);
expect(logger.warn).toHaveBeenCalledTimes(1);
});
});
describe('edge cases', () => {
it('should handle config with no web search keys', () => {
const config = {
someOtherKey: 'value',
anotherKey: '${SOME_VAR}',
};
checkWebSearchConfig(config);
expect(extractVariableName).not.toHaveBeenCalled();
expect(logger.debug).not.toHaveBeenCalled();
expect(logger.warn).not.toHaveBeenCalled();
});
it('should truncate long values in warning messages', () => {
const config = {
serperApiKey: 'this-is-a-very-long-api-key-that-should-be-truncated-in-the-warning-message',
};
extractVariableName.mockReturnValue(null);
checkWebSearchConfig(config);
expect(logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Current value: "this-is-a-..."'),
);
});
});
});

View File

@@ -38,6 +38,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
agents: interfaceConfig?.agents ?? defaults.agents,
temporaryChat: interfaceConfig?.temporaryChat ?? defaults.temporaryChat,
runCode: interfaceConfig?.runCode ?? defaults.runCode,
webSearch: interfaceConfig?.webSearch ?? defaults.webSearch,
customWelcome: interfaceConfig?.customWelcome ?? defaults.customWelcome,
});
@@ -48,6 +49,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: loadedInterface.runCode },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: loadedInterface.webSearch },
});
await updateAccessPermissions(SystemRoles.ADMIN, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: loadedInterface.prompts },
@@ -56,6 +58,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
[PermissionTypes.AGENTS]: { [Permissions.USE]: loadedInterface.agents },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: loadedInterface.temporaryChat },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: loadedInterface.runCode },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: loadedInterface.webSearch },
});
let i = 0;
@@ -74,7 +77,7 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
// warn about config.modelSpecs.prioritize if true and presets are enabled, that default presets will conflict with prioritizing model specs.
if (config?.modelSpecs?.prioritize && loadedInterface.presets) {
logger.warn(
'Note: Prioritizing model specs can conflict with default presets if a default preset is set. It\'s recommended to disable presets from the interface or disable use of a default preset.',
"Note: Prioritizing model specs can conflict with default presets if a default preset is set. It's recommended to disable presets from the interface or disable use of a default preset.",
);
i === 0 && i++;
}
@@ -88,14 +91,14 @@ async function loadDefaultInterface(config, configDefaults, roleName = SystemRol
loadedInterface.parameters)
) {
logger.warn(
'Note: Enforcing model specs can conflict with the interface options: endpointsMenu, modelSelect, presets, and parameters. It\'s recommended to disable these options from the interface or disable enforcing model specs.',
"Note: Enforcing model specs can conflict with the interface options: endpointsMenu, modelSelect, presets, and parameters. It's recommended to disable these options from the interface or disable enforcing model specs.",
);
i === 0 && i++;
}
// warn if enforce is true and prioritize is not, that enforcing model specs without prioritizing them can lead to unexpected behavior.
if (config?.modelSpecs?.enforce && !config?.modelSpecs?.prioritize) {
logger.warn(
'Note: Enforcing model specs without prioritizing them can lead to unexpected behavior. It\'s recommended to enable prioritizing model specs if enforcing them.',
"Note: Enforcing model specs without prioritizing them can lead to unexpected behavior. It's recommended to enable prioritizing model specs if enforcing them.",
);
i === 0 && i++;
}

View File

@@ -16,6 +16,7 @@ describe('loadDefaultInterface', () => {
agents: true,
temporaryChat: true,
runCode: true,
webSearch: true,
},
};
const configDefaults = { interface: {} };
@@ -29,6 +30,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: true },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
});
});
@@ -41,6 +43,7 @@ describe('loadDefaultInterface', () => {
agents: false,
temporaryChat: false,
runCode: false,
webSearch: false,
},
};
const configDefaults = { interface: {} };
@@ -54,6 +57,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: false },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: false },
});
});
@@ -70,6 +74,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -82,6 +87,7 @@ describe('loadDefaultInterface', () => {
agents: undefined,
temporaryChat: undefined,
runCode: undefined,
webSearch: undefined,
},
};
const configDefaults = { interface: {} };
@@ -95,6 +101,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -107,6 +114,7 @@ describe('loadDefaultInterface', () => {
agents: true,
temporaryChat: undefined,
runCode: false,
webSearch: true,
},
};
const configDefaults = { interface: {} };
@@ -120,6 +128,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
});
});
@@ -133,6 +142,7 @@ describe('loadDefaultInterface', () => {
agents: true,
temporaryChat: true,
runCode: true,
webSearch: true,
},
};
@@ -145,6 +155,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: true },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: true },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: true },
});
});
@@ -161,6 +172,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -177,6 +189,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -193,6 +206,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -218,6 +232,7 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
@@ -231,6 +246,7 @@ describe('loadDefaultInterface', () => {
agents: undefined,
temporaryChat: undefined,
runCode: undefined,
webSearch: undefined,
},
};
@@ -243,6 +259,33 @@ describe('loadDefaultInterface', () => {
[PermissionTypes.AGENTS]: { [Permissions.USE]: undefined },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: undefined },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: undefined },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
it('should call updateAccessPermissions with the correct parameters when WEB_SEARCH is undefined', async () => {
const config = {
interface: {
prompts: true,
bookmarks: false,
multiConvo: true,
agents: false,
temporaryChat: true,
runCode: false,
},
};
const configDefaults = { interface: {} };
await loadDefaultInterface(config, configDefaults);
expect(updateAccessPermissions).toHaveBeenCalledWith(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { [Permissions.USE]: true },
[PermissionTypes.BOOKMARKS]: { [Permissions.USE]: false },
[PermissionTypes.MULTI_CONVO]: { [Permissions.USE]: true },
[PermissionTypes.AGENTS]: { [Permissions.USE]: false },
[PermissionTypes.TEMPORARY_CHAT]: { [Permissions.USE]: true },
[PermissionTypes.RUN_CODE]: { [Permissions.USE]: false },
[PermissionTypes.WEB_SEARCH]: { [Permissions.USE]: undefined },
});
});
});

View File

@@ -10,6 +10,8 @@ const {
discordLogin,
facebookLogin,
appleLogin,
setupSaml,
openIdJwtLogin,
} = require('~/strategies');
const { isEnabled } = require('~/server/utils');
const keyvRedis = require('~/cache/keyvRedis');
@@ -19,7 +21,7 @@ const { logger } = require('~/config');
*
* @param {Express.Application} app
*/
const configureSocialLogins = (app) => {
const configureSocialLogins = async (app) => {
logger.info('Configuring social logins...');
if (process.env.GOOGLE_CLIENT_ID && process.env.GOOGLE_CLIENT_SECRET) {
@@ -62,10 +64,41 @@ const configureSocialLogins = (app) => {
}
app.use(session(sessionOptions));
app.use(passport.session());
setupOpenId();
const config = await setupOpenId();
if (isEnabled(process.env.OPENID_REUSE_TOKENS)) {
logger.info('OpenID token reuse is enabled.');
passport.use('openidJwt', openIdJwtLogin(config));
}
logger.info('OpenID Connect configured.');
}
if (
process.env.SAML_ENTRY_POINT &&
process.env.SAML_ISSUER &&
process.env.SAML_CERT &&
process.env.SAML_SESSION_SECRET
) {
logger.info('Configuring SAML Connect...');
const sessionOptions = {
secret: process.env.SAML_SESSION_SECRET,
resave: false,
saveUninitialized: false,
};
if (isEnabled(process.env.USE_REDIS)) {
logger.debug('Using Redis for session storage in SAML...');
const keyv = new Keyv({ store: keyvRedis });
const client = keyv.opts.store.client;
sessionOptions.store = new RedisStore({ client, prefix: 'saml_session' });
} else {
sessionOptions.store = new MemoryStore({
checkPeriod: 86400000, // prune expired entries every 24h
});
}
app.use(session(sessionOptions));
app.use(passport.session());
setupSaml();
logger.info('SAML Connect configured.');
}
};
module.exports = configureSocialLogins;

View File

@@ -4,10 +4,10 @@ const {
Capabilities,
EModelEndpoint,
isAgentsEndpoint,
AgentCapabilities,
isAssistantsEndpoint,
defaultRetrievalModels,
defaultAssistantsVersion,
defaultAgentCapabilities,
} = require('librechat-data-provider');
const { Providers } = require('@librechat/agents');
const partialRight = require('lodash/partialRight');
@@ -197,15 +197,7 @@ function generateConfig(key, baseURL, endpoint) {
}
if (agents) {
config.capabilities = [
AgentCapabilities.execute_code,
AgentCapabilities.file_search,
AgentCapabilities.artifacts,
AgentCapabilities.actions,
AgentCapabilities.tools,
AgentCapabilities.ocr,
AgentCapabilities.chain,
];
config.capabilities = defaultAgentCapabilities;
}
if (assistants && endpoint === EModelEndpoint.azureAssistants) {

Some files were not shown because too many files have changed in this diff Show More