Compare commits

..

166 Commits

Author SHA1 Message Date
Ruben Talstra
c2cdc869b7 🔧 chore: Comment out unused font family configurations and typography plugin in Tailwind CSS config 2025-02-27 16:36:14 +01:00
Ruben Talstra
2fa8d40d11 🔧 chore: Enhance Vite configuration with improved environment variable handling and chunking strategies 2025-02-27 16:34:16 +01:00
Ruben Talstra
b46c0ed43f 🔧 chore: Update ESLint and related dependencies in package.json and package-lock.json 2025-02-27 16:30:10 +01:00
Ruben Talstra
68c13ec610 Merge remote-tracking branch 'origin/chore/package-bumps-and-refactor' into chore/package-bumps-and-refactor
# Conflicts:
#	client/package.json
#	client/vite.config.ts
#	package-lock.json
2025-02-27 16:16:56 +01:00
Ruben Talstra
5e6b8f979c 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 16:15:42 +01:00
Ruben Talstra
c1032fe819 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 15:56:22 +01:00
Ruben Talstra
27df7fa7c1 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:43:10 +01:00
Ruben Talstra
5f131c0132 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:41:22 +01:00
Ruben Talstra
85d044b7cd 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:40:41 +01:00
Ruben Talstra
ac58be68e7 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:39:37 +01:00
Ruben Talstra
1e067150ac 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:36:42 +01:00
Ruben Talstra
4c598e4b16 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:35:38 +01:00
Ruben Talstra
b65c8ef9e2 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:14:00 +01:00
Ruben Talstra
0261b253e1 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:10:57 +01:00
Ruben Talstra
2e205b9186 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:10:39 +01:00
Ruben Talstra
4b36bd088e 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:08:22 +01:00
Ruben Talstra
67c50ff11f 🔧 chore: Update Tailwind CSS configuration and dependencies in package.json 2025-02-27 13:06:31 +01:00
Ruben Talstra
fcb1cf2eca 🔧 chore: Add Tailwind CSS configuration reference in style.css 2025-02-27 12:00:24 +01:00
Ruben Talstra
1741225f48 🔧 chore: Update dependencies in package.json and clean up ESLint config 2025-02-27 11:58:31 +01:00
github-actions[bot]
34f967eff8 🌍 i18n: Update translation.json with latest translations (#6009)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-26 16:23:56 -05:00
Danny Avila
be280004cf 🔧 refactor: Improve Params Handling, Remove Legacy Items, & Update Configs (#6074)
* chore: include all assets for service worker, remove unused tsconfig.node.json, eslint ignore vite config

* chore: exclude image files from service worker caching

* refactor: simplify googleSchema transformation and error handling

* fix: max output tokens cap for 3.7 models

* fix: skip index fixing in CI, development, and test environments

* ci: add maxOutputTokens handling tests for Claude models

* refactor: drop top_k and top_p parameters for claude-3.7 in AnthropicClient and add tests for new behavior

* refactor: conditionally include top_k and top_p parameters for non-claude-3.7 models

* ci: add unit tests for getLLMConfig function with various model options

* chore: remove all OPENROUTER_API_KEY legacy logic

* refactor: optimize stream chunk handling

* feat: reset model parameters button

* refactor: remove unused examples field from convoSchema and presetSchema

* chore: update librechat-data-provider version to 0.7.6993

* refactor: move excludedKeys set to data-provider for better reusability

* feat: enhance saveMessageToDatabase to handle unset fields and fetched conversation state

* feat: add 'iconURL' and 'greeting' to excludedKeys in data provider config

* fix: add optional chaining to user ID retrieval in getConvo call
2025-02-26 15:02:03 -05:00
Danny Avila
e14df5956a feat: Anthropic Agents Prompt Caching & UI Accessibility Enhancements (#6045)
* chore: remove auto-focus for now

* refactor: move react-hook-form Controller Logic to AgentSelect from AgentPanel

* fix: a11y focus issue with AgentSelect by never replacing it in its component tree

* fix: maintain ComboBox focus and force re-render on agent ID change in AgentPanel

* chore: `gemini-2.0-flash-lite-preview-02-05` (deprecated)

* refactor: extract cache control logic and headers configuration to helper functions in AnthropicClient

* feat: anthropic agents prompt caching

* chore: bump @librechat/agents and related dependencies

* fix: typo
2025-02-25 22:14:58 -05:00
Ruben Talstra
d3d7d11ea8 🌍 i18n: Add Georgian Language and Update Fallback Languages (#6022) 2025-02-25 20:28:58 -05:00
Danny Avila
f362f18870 🔗 fix: Shared Link with Markdown Code Error (#6016)
* refactor: Export AuthContext from AuthContextProvider

* refactor: Update useHasAccess to utilize useContext for AuthContext

* refactor: Enhance type definitions in useHasAccess for better type safety
2025-02-24 22:52:54 -05:00
Danny Avila
50e8769340 🚀 feat: Claude 3.7 Support + Reasoning (#6008)
* fix: missing console color methods for admin scripts

* feat: Anthropic Claude 3.7 Sonnet Support

* feat: update eventsource to version 3.0.2 and upgrade @modelcontextprotocol/sdk to 1.4.1

* fix: update DynamicInput to handle number type and improve initial value logic

* feat: first pass Anthropic Reasoning (Claude 3.7)

* feat: implement streaming support in AnthropicClient with reasoning UI handling

* feat: add missing xAI (grok) models
2025-02-24 20:08:55 -05:00
Danny Avila
0e719592c6 🔼 feat: "Run Code" Button Toggle (#5988)
* feat: Add 'Run Code' and 'Temporary Chat' permissions to role management

* feat: Add NextFunction typedef to api/typedefs.js

* feat: Add temporary chat and run code permissions to role schema

* refactor: Enhance access check middleware with logging for permission errors and better typing

* refactor: Set default value of USE permission to true in multiConvoPermissionsSchema

* refactor: Implement checkAccess function for separation of permission validation logic from middleware

* feat: Integrate permission checks for tool execution and enhance Markdown code block with execution capability

* fix: Convert REDIS_MAX_LISTENERS to a number, closes #5979
2025-02-23 14:01:36 -05:00
Ruben Talstra
2a74ceb630 🚀 feat: Add Custom Welcome Message in librechat.yaml (#5870)
* feat: Custom Welcome Message (#2967)

* don't think I'm on the right path?

*  feat: Implement custom welcome message configuration in interface
2025-02-22 17:43:00 -05:00
Marco Beretta
b404e372ec 🌟 feat: Enhance User Experience and SEO with Accessibility Updates and robots.txt (#5392)
* 🔈 fix: Refactor AudioRecorder to use button element for improved accessibility

* 🔈 fix: Update conversation menu button ID for improved accessibility

* 🔈 fix: Remove redundant role attribute from SidePanel for improved accessibility

* feat: Add robots.txt to manage web crawler access

* feat: Update index.html with meta description and remove legacy file

* fix: resolve merge conflicts.

* fix: resolve merge conflicts.

* fix: resolve merge conflicts.

* feat: Update index.html with meta description and remove legacy file

* 🔧 feat: Add legacy support and improve SidePanel accessibility

* 🔧 feat: Integrate express-static-gzip for improved static file serving and add new plugins for enhanced functionality

* 🔧 chore: Remove unused HTML ESLint plugin configurations and dependencies

---------

Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>
2025-02-22 17:42:20 -05:00
David
96c091c550 🤖 feat: 192x192 Icon for Android PWA (#5966)
* fix: Add 192x192 icon to allow the PWA to install on Android devices using Chrome.

* fix: Use less whitespace on icon-192x192.png. Re-generate maskable-icon.png and apple-touch-icon-180x180.png to be higher quality, cripser and use less kilobytes.
2025-02-22 17:17:35 -05:00
Ruben Talstra
94a2c1ff10 ⚙️ ci: Trigger Restriction for Detect Unused NPM Packages (#5844)
The workflow now only runs on pull requests that modify:
- The root `package.json` or `package-lock.json` file
- Any file under the client folder
- Any file under the api folder
2025-02-22 17:15:29 -05:00
github-actions[bot]
1260551690 🌍 i18n: Update translation.json with latest translations (#5946)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-21 15:10:19 -05:00
Danny Avila
fc733d2b9e 👐 refactor: Agents Accessibility and Gemini Error Handling (#5972)
* style: Enhance ControlCombobox with Carat Display, ClassName, and Disabled State

* refactor(ModelPanel): replace SelectDropdown with ControlCombobox for improved accessibility

* style: Adjust padding and positioning in ModelPanel for improved layout

* style(ControlCombobox): add containerClassName and iconSide props for enhanced customization

* style(ControlCombobox): add iconClassName prop for customizable icon styling

* refactor(AgentPanel): enhance layout with new button for creating agents and adjust structure for better alignment

* refactor(AgentSelect): replace SelectDropDown with ControlCombobox for improved accessibility and layout

* feat(translation): add new translation key for improved UI clarity

* style(AgentSwitcher, AssistantSwitcher): add iconClassName prop for customizable icon styling

* refactor(AgentPanelSkeleton): improve layout of skeleton components to match new visual structure

* style(AgentPanel, AgentPanelSkeleton): add margin to flex container for improved layout consistency

* a11y(AgentSelect, ControlCombobox): add selectId prop for preventing focus going to start to page after agent selection

* fix(AgentSelect): update SELECT_ID constant for improved clarity in component identification

* fix(GoogleClient): update type annotation, add abort handling for content generation requests, catch "uncaught" abort errors and GoogleGenerativeAI errors from `@google/generative-ai`
2025-02-21 15:02:07 -05:00
Danny Avila
1e625f7557 🚀 feat: Support Redis Clusters, Trusted Proxy Setting, And Toggle Meilisearch Indexing (#5963)
* refactor: Improve MeiliSearch integration with environment-based configuration for running index sync

* chore: Remove Question issue template from GitHub repository

* feat: Enable indexing in MeiliSearch configuration and clean up error handling in indexSync

* feat: Update .env.example to include optional indexing configuration

* refactor: rename env var for disabling index sync to MEILI_NO_SYNC

* Added the option to change the default trusted proxy

* feat: Add TRUST_PROXY configuration to .env.example for reverse proxy settings

* feat: Enhance Redis support with cluster configuration and TLS options

* feat(redis): add cluster support, environment config and url mapping

- Add Redis cluster configuration with isEnabled flag
- Configure prefix and max listeners settings
- Improve code formatting and readability
- Fix URL vs host parameter handling
- Update environment variables and regex patterns

---------

Co-authored-by: Gil Assunção <gil.assuncao@parceiros.nos.pt>
Co-authored-by: Pedro Reis <pedro.malheiro@parceiros.nos.pt>
Co-authored-by: João Trigo Soares <joao.soares@parceiros.nos.pt>
2025-02-20 17:39:12 -05:00
Märt
46a96b9caa 🔢 chore: Remove Dollar Sign from Balance Display (#5948) 2025-02-20 16:49:43 -05:00
Marco Beretta
fe7013562b style: Enhance Styling & Accessibility (#5956)
*  feat: Enhance UI Components with Shadows and Accessibility Improvements

* 🔧 fix: Correct Category Labels and Values in API Model & Adjust Button Class in Prompt List
2025-02-20 16:17:43 -05:00
Danny Avila
fdb3cf3f58 🔧 fix: Resizable Panel Unmount Error & Code Env. File Re-Upload (#5947)
* 🔧 refactor: handle full path for code env. file re-upload

* fix: update react-resizable-panels to version 2.1.7 to resolve error thrown on unmount of artifacts; ref: https://github.com/bvaughn/react-resizable-panels/issues/372

* refactor: replace promptPrefix with systemMessage in GoogleClient for improved clarity, and to prevent saving LibreChat feature-specific instructions to the user's custom instructions
2025-02-19 14:53:22 -05:00
Ruben Talstra
538a2a144a 🔒 fix: 2FA Encrypt TOTP Secrets & Improve Docs (#5933)
* 🔒 fix: Integrate TOTP secret retrieval and encryption in Two-Factor Authentication

* 🔒 refactor: Simplify TOTP verification by removing commented-out code
2025-02-19 13:33:29 -05:00
Ruben Talstra
06282b584f 📜 ci: AutomateCHANGELOG.md (#5838)
* feat: started with automated CHANGELOG.md

* fix: no `configuration.json` found

* refactor: `configuration.json`

* fix: missing label `configuration.json`

* fix: missing label `configuration.json`

* fix: missing label `configuration.json`

* fix: missing label `configuration.json`

* fix: missing label `configuration.json`

* ci: test new workflow action

* ci: test new workflow action

* ci: test new workflow action

* feat: working CHANGELOG.md generation

* feat: working CHANGELOG.md generation

* feat: working CHANGELOG.md generation

* feat: working CHANGELOG.md generation

* feat: working CHANGELOG.md generation

* feat: working CHANGELOG.md generation

* feat: working CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* fix: separate release and Unreleased workflows CHANGELOG.md generation

* refactor: only trigger the `unreleased-changelog` action on push to `main`

and `generate-release-changelog` only when pushing a tag with `v*.*.*`

* refactor: Runs only every Monday at 00:00 UTC
2025-02-18 08:35:43 -05:00
Danny Avila
ecddffa7b2 🐛 fix: RAG Results Sorted By Distance (#5931)
* refactor: Extract file unlinking logic into a separate function and don't throw error

* fix: RAG results are actually in distance, not score
2025-02-18 08:14:19 -05:00
Danny Avila
964a74c73b 🛠 refactor: Ensure File Deletions, File Naming, and Agent Resource Updates (#5928)
* refactor: Improve error logging for file upload and processing functions to prevent verbosity

* refactor: Add uploads directory to Docker Compose to persist file uploads

* refactor: `addAgentResourceFile` to handle edge case of non-existing `tool_resource` array

* refactor: Remove version specification from deploy-compose.yml

* refactor: Prefix filenames with file_id to ensure uniqueness in file uploads

* refactor: Enhance error handling in deleteVectors to log warnings for non-404 errors

* refactor: Limit file search results to top 5 based on relevance score

* 🌍 i18n: Update translation.json with latest translations

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-17 19:37:03 -05:00
Ruben Talstra
f0f09138bd 🔒 feat: Two-Factor Authentication with Backup Codes & QR support (#5685)
* 🔒 feat: add Two-Factor Authentication (2FA) with backup codes & QR support (#5684)

* working version for generating TOTP and authenticate.

* better looking UI

* refactored + better TOTP logic

* fixed issue with UI

* fixed issue: remove initial setup when closing window before completion.

* added: onKeyDown for verify and disable

* refactored some code and cleaned it up a bit.

* refactored some code and cleaned it up a bit.

* refactored some code and cleaned it up a bit.

* refactored some code and cleaned it up a bit.

* fixed issue after updating to new main branch

* updated example

* refactored controllers

* removed `passport-totp` not used.

* update the generateBackupCodes function to generate 10 codes by default:

* update the backup codes to an object.

* fixed issue with backup codes not working

* be able to disable 2FA with backup codes.

* removed new env. replaced with JWT_SECRET

*  style: improved a11y and style for TwoFactorAuthentication

* 🔒 fix: small types checks

*  feat: improve 2FA UI components

* fix: remove unnecessary console log

* add option to disable 2FA with backup codes

* - add option to refresh backup codes
- (optional) maybe show the user which backup codes have already been used?

* removed text to be able to merge the main.

* removed eng tx to be able to merge

* fix: migrated lang to new format.

* feat: rewrote whole 2FA UI + refactored 2FA backend

* chore: resolving conflicts

* chore: resolving conflicts

* fix: missing packages, because of resolving conflicts.

* fix: UI issue and improved a11y

* fix: 2FA backup code not working

* fix: update localization keys for UI consistency

* fix: update button label to use localized text

* fix: refactor backup codes regeneration and update localization keys

* fix: remove outdated translation for shared links management

* fix: remove outdated 2FA code prompts from translation.json

* fix: add cursor styles for backup codes item based on usage state

* fix: resolve conflict issue

* fix: resolve conflict issue

* fix: resolve conflict issue

* fix: missing packages in package-lock.json

* fix: add disabled opacity to the verify button in TwoFactorScreen

* ⚙ fix: update 2FA logic to rely on backup codes instead of TOTP status

* ⚙️ fix: Simplify user retrieval in 2FA logic by removing unnecessary TOTP secret query

* ⚙️ test: Add unit tests for TwoFactorAuthController and twoFactorControllers

* ⚙️ fix: Ensure backup codes are validated as an array before usage in 2FA components

* ⚙️ fix: Update module path mappings in tests to use relative paths

* ⚙️ fix: Update moduleNameMapper in jest.config.js to remove the caret from path mapping

* ⚙️ refactor: Simplify import paths in TwoFactorAuthController and twoFactorControllers test files

* ⚙️ test: Mock twoFactorService methods in twoFactorControllers tests

* ⚙️ refactor: Comment out unused imports and mock setups in test files for two-factor authentication

* ⚙️ refactor: removed files

* refactor: Exclude totpSecret from user data retrieval in AuthController, LoginController, and jwtStrategy

* refactor: Consolidate backup code verification to apply DRY and remove default array in user schema

* refactor: Enhance two-factor authentication ux/flow with improved error handling and loading state management, prevent redirect to /login

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2025-02-17 19:09:36 -05:00
Marco Beretta
46ceae1a93 ⚖️ docs: Update LICENSE.md Year: 2024 -> 2025 (#5915) 2025-02-17 10:39:46 -05:00
Danny Avila
a65647a7de ⚙️ refactor: Enhance Logging, Navigation And Error Handling (#5910)
* refactor: Ensure Axios Errors are less Verbose if No Response

* refactor: Improve error handling in logAxiosError function

* fix: Prevent ModelSelect from rendering for Agent Endpoints

* refactor: Enhance logging functions with type parameter for better clarity

* refactor: Update buildDefaultConvo function to use optional endpoint parameter since we pass a default value for undefined

* refactor: Replace console logs with logger warnings and errors in useNavigateToConvo hook, and handle removed endpoint edge case

* chore: import order
2025-02-16 11:47:01 -05:00
Danny Avila
93dd365fda 🐞 fix: Add Null Checks for BaseURL in Agent Config (#5908) 2025-02-16 10:52:29 -05:00
Danny Avila
350e72dede 🧠 feat: Reasoning UI for Agents (#5904)
* chore: bump https-proxy-agent and @librechat/agents

* refactor: Improve error logging in OllamaClient for API fetch failures

* feat: Add DeepSeek provider support and enhance provider name handling

* refactor: Use Providers.OLLAMA constant for model name check in fetchModels function

* feat: Enhance formatAgentMessages to handle reasoning content type

* feat: OpenRouter Agent Reasoning

* hard work and dedicationgit add .env.example :)

* fix: Handle Google social login with missing last name

Social login with Google was previously displaying 'undefined' when
a user's last name was empty or not provided.

Changes:
- Conditionally render last name only if it exists
- Prevent displaying 'undefined' when last name is missing

* fix: add missing file endings for developers yml,yaml and log

---------

Co-authored-by: Mohamed Al-Duraji <mbalduraji@college.harvard.edu>
Co-authored-by: Deepak Kendole <deepakdpk101@gmail.com>
Co-authored-by: Peter Rothlaender <peter.rothlaender@ginkgo.com>
2025-02-15 18:52:29 -05:00
Danny Avila
e3b5c59949 ⚙️ fix: File Config Handling (revisited) (#5881)
* fix: improve file handling by preventing memoization issues, providing config values at run time

* 🌍 i18n: Update translation.json with latest translations
2025-02-14 11:37:41 -05:00
Ruben Talstra
61f0480b57 🐞 i18n: Remove Debug Mode (#5879) 2025-02-14 10:52:59 -05:00
Ruben Talstra
04c2a5abe7 🌍 fix: Enhance i18n Support & Optimize Category Handling (#5866)
* fix: Missing Translations in Prompt Filters in Prompt Library

* fix: fixed issue with `zh`
feat: added `Estonian` language option

* fix: test for `i18n.ts`

* refactor: `pt` --> `pt-BR` and `pt-PT`

* feat: request access to another language. default is only one language during invite.
2025-02-14 08:30:27 -05:00
github-actions[bot]
52a6de2aa7 🌍 i18n: Update translation.json with latest translations (#5855)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-13 16:06:25 -05:00
Ruben Talstra
750b22d5f4 🌏 i18n: fix Traditional Chinese Language Option (#5854) 2025-02-13 14:20:30 -05:00
Danny Avila
28fe1218c5 🔧 fix: Ariakit Combobox Virtualization (#5851)
Ariakit Combobox was not working well with several virtualization libraries as automated focus management was conflicting with scrolling/styling required of other virtualization methods. The entire strategy was replaced using experimental ariakit virtualization component `SelectRenderer`

Performance of component was also improved as a result of latest ariakit lib changes
2025-02-13 10:07:40 -05:00
github-actions[bot]
e402979cc5 🌍 i18n: Update translation.json with latest translations (#5849)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-13 09:34:14 -05:00
Ruben Talstra
4c8311b606 🛡️ chore: patch elliptic to address GHSA-vjh7-7g9h-fjfh (#5848) 2025-02-13 08:20:11 -05:00
Danny Avila
4fa13f45e3 🔃 refactor: Parent Message ID Handling on Error, Update Translations, Bump Agents (#5833)
* 📦 chore: Update @librechat/agents to version 2.0.5

* fix: Update error handling in AskController and EditController to include overrideParentMessageId when catching errors

* fix: Update parentMessageId assignment in AgentController to prioritize overrideParentMessageId

* 🌍 i18n: Update translation.json with latest translations

---------

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-12 16:49:32 -05:00
Ruben Talstra
7f48030452 🔄 chore: Enforce 18next Language Keys (#5803)
* chore: enforcing language keys to adhere to the new standard.

* chore: enforcing i18n forbids to write plain text in JSX markup

* chore: enforcing i18n forbids to write plain text in JSX markup

* fix: ci with checkbox for unused keys :)

* refactor: removed all the unused `i18n` keys
2025-02-12 15:48:13 -05:00
Danny Avila
2a506df443 🪄 fix: Agent Artifacts condition 2025-02-11 19:44:20 -05:00
Danny Avila
bfbaaebd2b 🪄 feat: Agent Artifacts (#5804)
* refactor: remove artifacts toggle

* refactor: allow hiding side panel while allowing artifacts view

* chore: rename SidePanelGroup to SidePanel for clarity

* Revert "refactor: remove artifacts toggle"

This reverts commit f884c2cfcd.

* feat: add artifacts capability to agent configuration

* refactor: conditionally set artifacts mode based on endpoint type

* feat: Artifacts Capability for Agents

* refactor: enhance getStreamText method to handle intermediate replies and add `stream_options` for openai/azure

* feat: localize progress text and improve UX in CodeAnalyze and ExecuteCode components for expanding analysis
2025-02-11 18:00:38 -05:00
Danny Avila
46f034250d v0.7.7-rc1 (#5801) 2025-02-11 11:45:07 -05:00
Danny Avila
4de9619bd9 🧠 fix: Handle Reasoning Chunk Edge Cases (#5800)
* refactor: better reasoning parsing

* style: better model selector mobile styling

* chore: bump vite
2025-02-11 11:28:18 -05:00
Ruben Talstra
404b27d045 📦 chore: Bump Packages (#5791)
* chore: started with updating packages to new version.
(a lot are outdated)

* fix: eslint to pass when no matching files changed.

* fix: eslint to pass when no matching files changed.

* fix: issue with strict in actions with the test

* chore: update more dependencies

* feat: scan for unused imported packages

* feat: scan for unused imported packages

* feat: scan for unused imported packages

* feat: scan for unused imported packages

* feat: scan for unused imported packages

* feat: scan for unused imported packages

* feat: scan for unused imported packages

* chore: removed Unused NPM Packages

* chore: removed Unused NPM Packages in `client/package.json`

* chore: removed Unused NPM Packages in `client/package.json`

* chore: Only comments when there are actual unused dependencies.

* chore: Only comments when there are actual unused dependencies.

* ci: test if it detects unused packages.

* ci: removed unused packages.

* ci: both static and dynamic i18n keys

* ci: revert back to no dynamic. use official nesting

* chore: remove override package: ajv
2025-02-11 09:55:13 -05:00
github-actions[bot]
936199b950 🌍 i18n: Update translation.json with latest translations (#5789)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-11 09:53:26 -05:00
owengo
d844e56c50 🔨 feat: Use x-strict attribute in OpenAPI Actions for Strict Function Definition (#4639)
* feat: manage an 'x-strict': true attribute in openapi specs for assistants which generates function calls with stric attribute

* fix typo and lint errors

---------

Co-authored-by: Olivier Schiavo <olivier.schiavo@wengo.com>
2025-02-10 16:02:21 -05:00
Ruben Talstra
aea055b597 🔄 chore: Refactor Locize Workflow for Improved Translation Sync (#5781) 2025-02-10 16:01:27 -05:00
Ruben Talstra
3d0c27f525 🛠️ ci: Add Workflow to Detect Unused i18next Keys in PRs (#5782)
* created: checks for unused i18n keys in codebase.

* updated the file to test this new check on this PR.

* updated the file to test this new check on this PR.

* updated the file to test this new check on this PR.

* updated the file to test this new check on this PR.

* updated the file to test this new check on this PR.

* removed the testing option. will now only run in `client/src/**`
2025-02-10 16:00:57 -05:00
Ruben Talstra
d99a9db3f6 feat: OAuth for Actions (#5693)
* feat: OAuth for Actions

* WIP: PoC flow state manager

* refactor: Add identifier field to token model from action schema

* chore: fix potential file type issues

* ci: fix type issue with action metadata auth

* fix: ensure FlowManagerOptions has a default ttl value

* WIP: OAUTH actions

* WIP: first pass OAuth Action

* fix: standardize identifier usage in OAuth flow handling

* fix: update token retrieval to include userId in query and use correct identifier

* refacotr: update token retrieval to use userId for OAuth token query

* feat: Tool Call Auth styling

* fix: streamline token creation and add type field to token schema

* refactor: cleanup OAuth flow by encrypting client credentials and ensuring oauth operations only run under condition

* refactor: use encrypted credentials in OAuth callback

* fix: update Token collection indexes to use expiresAt TTL index and not createdAt legacy index

* refactor: enhance Token index cleanup by improving logging and removing redundant index creation logic

* refactor: remove unused OAuth login route and related logic for improved clarity

* refactor: replace fetch with axios for OAuth token exchange and improve error handling

* refactor: better UX after authentication before oauth tool execution

* refactor: implement cleanup handlers for FlowStateManager intervals to enhance resource management

* refactor: encrypt OAuth tokens before storing and decrypt upon retrieval for enhanced security

* refactor: enhance authentication success page with improved styling and countdown feature

* refactor: add response_type parameter to OAuth redirect URI for improved compatibility

* chore: update translation.json new localizations

* chore: remove unused OGDialog import from OGDialogTemplate component

* refactor: Actions Auth using new Dialog styling, use same component with Agents/Assistants

* refactor: update removeNullishValues function to support removal of empty strings and adjust transform usage in schemas

* chore: bump version of librechat-data-provider to 0.7.6991

* refactor: integrate removeNullishValues function to clean metadata before encryption in agent and assistant routes

* refactor: update OAuth input fields to use 'password' type for better security

* refactor: update localization placeholders for sign-in message to use double curly braces

* refactor: add access_type parameter for offline access in createActionTool function

* refactor: implement handleOAuthToken function for token management and encryption

* feat: refresh token support

* refactor: add default expiration for access token and error handling for missing token

* feat: localizations for ActionAuth

* refactor: set refresh token expiration to null to not expire if expiry never given

* fix: prevent crash fromerror within async handleAbortError in AskController, EditController, and AgentController

* feat: Action Callback URL

* 🌍 i18n: Update translation.json with latest translations

* refactor: handle errors in flow state checking to prevent unhandled promise rejections

* fix: improve flow state concurrency to prevent multiple token creation calls

* refactor: RequestExecutor to use separate axios instance

* refactor: improve concurrency flows by keeping completed state until TTL expiry

* refactor: increase TTL for flow state management and adjust monitoring interval

* ci: mock axios instance creation in actions spec

* feat: add Babel and Jest configuration files; implement FlowStateManager tests with concurrency handling

* chore: add disableOAuth prop to ActionsAuth (not implemented for Assistants yet)

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-10 15:56:08 -05:00
Ruben Talstra
71c30a3640 🎯 ci: Update ESLint Workflow to target api/ and client/ changes (#5771) 2025-02-10 09:05:03 -05:00
Ruben Talstra
d90c9c4b77 📜 ci: Consolidate Locize Workflows for Missing Keys & PR Creation (#5769) 2025-02-10 09:03:59 -05:00
github-actions[bot]
37f6099f0a 🌍 i18n: Update translation.json with latest translations (#5765)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-10 09:02:56 -05:00
Marco Beretta
93415ebbd7 📝 docs: Update Language Request Template & Update README (#5766)
* Update README.md

* Update NEW-LANGUAGE-REQUEST.yml

* Updated: README.md
Removed: TRANSLATION.md

---------

Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>
2025-02-10 09:02:33 -05:00
github-actions[bot]
15c55d226e 🌍 i18n: Update translation.json with latest translations (#5764)
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-02-09 15:47:25 -05:00
Ruben Talstra
1f31171fca 🤖 ci: locize-pull-published-sync-pr.yml (#5763)
* fix: ci for locize-pull-published-sync-pr.yml

* fix: ci for locize-pull-published-sync-pr.yml

* fixed missing parameter: base: main

* removed running on pull_request
2025-02-09 15:18:01 -05:00
Ruben Talstra
96f1133f0d 🤖 ci: locize-pull-published-sync-pr.yml (#5762)
* fix: ci for locize-pull-published-sync-pr.yml

* fix: ci for locize-pull-published-sync-pr.yml

* fixed missing parameter: base: main
2025-02-09 14:51:28 -05:00
Ruben Talstra
86134415e9 🧹 chore: Migrate to Flat ESLint Config & Update Prettier Settings (#5737)
* chore: migrated eslint v8 to v9

* chore: migrated eslint v8 to v9

* ESLint only checks the files that have changed in the pull request.

* fix: ESLint only checks the files that have changed in the pull request.

* refactor: eslint only on changed files

* refactor: eslint only on changed files or added files

* refactor: eslint only on changed files or added files

* refactor: eslint only on changed files or added files

but only include files that are not deleted (ACMRTUXB: A, C, M, R, T, U, X, B).

* whoops missed something
2025-02-09 12:15:20 -05:00
Ruben Talstra
aae413cc71 🌎 i18n: React-i18next & i18next Integration (#5720)
* better i18n support an internationalization-framework.

* removed unused package

* auto sort for translation.json

* fixed tests with the new locales function

* added new CI actions from locize

* to use locize a mention in the README.md

* to use locize a mention in the README.md

* updated README.md and added TRANSLATION.md to the repo

* updated TRANSLATION.md badges

* updated README.md to go to the TRANSLATION.md when clicking on the Translation Progress badge

* updated TRANSLATION.md and added a new issue template.

* updated TRANSLATION.md and added a new issue template.

* updated issue template to add the iso code link.

* updated the new GitHub actions for `locize`

* updated label for new issue template --> i18n

* fixed type issue

* Fix eslint

* Fix eslint with key-spacing spacing

* fix: error type

* fix: handle undefined values in SortFilterHeader component

* fix: typing in Image component

* fix: handle optional promptGroup in PromptCard component

* fix: update localize function to accept string type and remove unnecessary JSX element

* fix: update localize function to enforce TranslationKeys type for better type safety

* fix: improve type safety and handle null values in Assistants component

* fix: enhance null checks for fileId in FilesListView component

* fix: localize 'Go back' button text in FilesListView component

* fix: update aria-label for menu buttons and add translation for 'Close Menu'

* docs: add Reasoning UI section for Chain-of-Thought AI models in README

* fix: enhance type safety by adding type for message in MultiMessage component

* fix: improve null checks and optional chaining in useAutoSave hook

* fix: improve handling of optional properties in cleanupPreset function

* fix: ensure isFetchingNextPage defaults to false and improve null checks for messages in Search component

* fix: enhance type safety and null checks in useBuildMessageTree hook

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-02-09 12:05:31 -05:00
Kay Belardinelli
2e8d969e35 🔇 a11y: Silence Unnecessary Icons for Screen Readers (#5726)
* a11y: silence miscellaneous icons that should not be read by screen reader (#5723, #5724)

* 📝 chore: Update bug report template with additional guidance and version information

* 📝 chore: Update bug report template to guide users on using Discussions for general inquiries and setup help

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-02-09 10:53:43 -05:00
Ruben Talstra
1519afd4b9 🧹 chore: Enhance Issue Templates with Emoji Labels (#5754)
* updated the labels in the templates.

* fixed spacing in label in the templates.
2025-02-09 14:41:57 +01:00
Stefan Siegel
d786bf263c 📱 feat: improve mobile viewport behavior with interactive-widget meta (#5675)
fixed mobile viewport behavior when keyboard appears: content now resizes properly instead of scrolling, keeping the top area visible
2025-02-08 00:15:49 +01:00
Danny Avila
8b2ffa141e 🔍 a11y: MultiSearch Clear Input (#5718)
* add accessibility features to model search

* chore: linting

* fix: Improve accessibility by adding aria-label to MultiSearch input

* refactor: MultiSearch component as button

* refactor: Update MultiSearch component styles for improved theming

* refactor: Update MultiSearch component styles for improved visual consistency

---------

Co-authored-by: Derek Jackson <derek_jackson@harvard.edu>
Co-authored-by: derek jackson <63861027+derekjackson-das@users.noreply.github.com>
Co-authored-by: Ruben Talstra <RubenTalstra1211@outlook.com>
2025-02-07 09:38:18 -05:00
5026
18339ec7bb 🌍 i18n: "Balance" Localization For ZhTraditional (#5682) 2025-02-06 20:16:22 -05:00
Marco Beretta
70e410f38b 💬 fix: Temporary Chat PR's broken components and improved UI (#5705)
* 💬 fix: Temporary Chat PR's broken components and improved UI

* 💬 fix: bring back hover effect on AudioRecorder button

* style: adjust position of Mention component popover

* refactor: PromptsCommand typing and style position

* refactor: virtualize mention UI

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-02-06 20:15:38 -05:00
Danny Avila
63afb317c6 🚀 fix: Resolve Google Client Issues, CDN Screenshots, Update Models (#5703)
* 🤖 refactor: streamline model selection logic for title model in GoogleClient

* refactor: add options for empty object schemas in convertJsonSchemaToZod

* refactor: add utility function to check for empty object schemas in convertJsonSchemaToZod

* fix: Google MCP Tool errors, and remove Object Unescaping as Google fixed this

* fix: google safetySettings

* feat: add safety settings exclusion via GOOGLE_EXCLUDE_SAFETY_SETTINGS environment variable

* fix: rename environment variable for console JSON string length

* fix: disable portal for dropdown in ExportModal component

* fix: screenshot functionality to use image placeholder for remote images

* feat: add visionMode property to BaseClient and initialize in GoogleClient to fix resendFiles issue

* fix: enhance formatMessages to include image URLs in message content for Vertex AI

* fix: safety settings for titleChatCompletion

* fix: remove deprecated model assignment in GoogleClient and streamline title model retrieval

* fix: remove unused image preloading logic in ScreenshotContext

* chore: update default google models to latest models shared by vertex ai and gen ai

* refactor: enhance Google error messaging

* fix: update token values and model limits for Gemini models

* ci: fix model matching

* chore: bump version of librechat-data-provider to 0.7.699
2025-02-06 18:13:18 -05:00
Andrés Restrepo
33e60c379b 📜 feat: Configure JSON Log Truncation Size (#5215) 2025-02-06 13:36:25 -05:00
Ruben Talstra
ae7814a2b3 🔧 fix: Wrong import useGetStartupConfig (#5692)
* fixed build failed error

* chore: import order

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-02-06 13:30:15 -05:00
Yuichi Oneda
8c404ae056 💬 feat: Temporary Chats (#5493)
* feat: add expiredAt property to Conversation and Message models

Added `expiredAt` property to both Conversation and Message schemas.
Configured `expireAfterSeconds` index in MongoDB to automatically delete documents after a specified period.

* feat(data-provider): add isTemporary and expiredAt properties to support temporary chats

Added `isTemporary` property to TPayload and TSubmission for API calls for temporary chat.
Additionally, added `expiredAt` property to `tConversationSchema` to determine if a chat is temporary.

* feat: implement isTemporary state management

Add Recoil state for tracking temporary conversations, update event handlers to respect temporary chat status

* feat: add configuration to interfaceconfig to hide the temporary chat switch

* feat: add Temporary Chat UI with switch and modify related behaviors

- Added a Temporary Chat switch button at the end of dropdown lists in each model.
- Updated the form background color to black when Temporary Chat is enabled.
- Modified Navigation to exclude Temporary Chats from the chat list.

* fix: exclude Temporary Chats from search results

Updated the getConvosQueried query to ensure that Temporary Chats are not included in the search results.

* fix: hide bookmark button for Temporary Chats

Updated the UI to ensure that the bookmark button is not displayed when a chat is as Temporary Chat.

* chore: update isTemporary state management in ChatRoute

* chore: fix to pass the tests
2025-02-06 11:11:47 -05:00
Marco Beretta
5f9543f6fc 🛠️ fix: enhance UI/UX and address a11y issues in SetKeyDialog (#5672)
*  refactor: Improve UI consistency and accessibility in SetKeyDialog components

* 🎨 style: Add cursor pointer to Slider component for better UX

* 🐛 chore: Remove unnecessary console log from SetKeyDialog component
2025-02-05 16:35:07 -05:00
Marco Beretta
73fe0835cf 🎨 style: Prompt UI Refresh & A11Y Improvements (#5614)
* 🚀 feat: Add animated search input and improve filtering UI

* 🏄 refactor: Clean up category options and optimize event handlers in ChatGroupItem

* 🚀 refactor: 'Rename Prompt' option and enhance prompt filtering UI
Changed the useUpdatePromptGroup mutation in prompts.ts to replace the JSON.parse(JSON.stringify(...)) clones with structuredClone. This avoids errors when data contains non‑JSON values and improves data cloning reliability

* 🔧 refactor: Update Sharing Prompts UI; fix: Show info message only after updating switch status

* 🔧 refactor: Simplify condition checks and replace button with custom Button component in SharePrompt

* 🔧 refactor: Update DashGroupItem styles and improve accessibility with updated aria-label

* 🔧 refactor: Adjust layout styles in GroupSidePanel and enhance loading skeletons in List component

* 🔧 refactor: Improve layout and styling of AdvancedSwitch component; adjust DashBreadcrumb margin for better alignment

* 🔧 refactor: Add new surface colors for destructive actions and update localization strings for confirmation prompts

* 🔧 refactor: Update PromptForm and PromptName components for improved layout and styling; replace button with custom Button component

* 🔧 refactor: Enhance styling and layout of DashGroupItem, FilterPrompts, and Label components for improved user experience

* 🔧 refactor: Update DeleteBookmarkButton and Label components for improved layout and text handling

* 🔧 refactor: Simplify CategorySelector usage and update destructive surface colors for a11y

* 🔧 refactor: Update styling and layout of PromptName, SharePrompt, and DashGroupItem components; enhance Dropdown functionality with custom renderValue

* 🔧 refactor: Improve layout and styling of various components; update button sizes and localization strings for better accessibility and user experience

* 🔧 refactor: Add useCurrentPromptData hook and enhance RightPanel component; update CategorySelector for improved functionality and accessibility

* 🔧 refactor: Update input components and styling for Command and Description; enhance layout and accessibility in PromptVariables and PromptForm

* 🔧 refactor: Remove useCurrentPromptData hook and clean up related components; enhance PromptVersions layout

* 🔧 refactor: Enhance accessibility by adding aria-labels to buttons and inputs; improve localization for filter prompts

* 🔧 refactor: Enhance accessibility by adding aria-labels to various components; improve layout and styling in PromptForm and CategorySelector

* 🔧 refactor: Enhance accessibility by adding aria-labels to buttons and components; improve dialog roles and descriptions in SharePrompt and PromptForm

* 🔧 refactor: Improve accessibility by adding aria-labels and roles; enhance layout and styling in ChatGroupItem, ListCard, and ManagePrompts components

* 🔧 refactor: Update UI components for improved styling and accessibility; replace button elements with custom Button component and enhance layout in VariableForm, PromptDetails, and PromptVariables

* 🔧 refactor: Improve null checks for group and instanceProjectId in SharePrompt component; enhance readability and maintainability

* style: Enhance AnimatedSearchInput component with TypeScript types; improve conditional rendering for search states and accessibility

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-02-05 11:37:17 -05:00
heptapod
a44f5b4b6e 🌍 i18n: Fix "Balance" Localization For De (#5656) 2025-02-05 10:28:12 -05:00
RedwindA
40d9b1d2a2 🌍 i18n: Fix "Balance" Localization For Zh&ZhTraditional (#5632)
* Update translation of `balance` in Zh.ts

* Update translation of `balance` in ZhTraditional.ts
2025-02-05 15:58:23 +01:00
Danny Avila
6c33dc2eb3 🤖 refactor: Prevent Vertex AI from Setting Parameter Defaults (#5653)
* refactor: remove google defaults

* refactor: improve GoogleClient stream handling and metadata usage

* chore: update @librechat/agents to version 2.0.1

* fix: return client instance in GoogleClient configuration
2025-02-04 21:45:43 -05:00
Danny Avila
0312d4f4f4 🔧 refactor: Revamp Model and Tool Filtering Logic (#5637)
* 🔧 fix: Update regex to correctly match OpenAI model identifiers

* 🔧 fix: Enhance tool filtering logic in ToolService to handle inclusion and exclusion criteria for basic tools and toolkits

* feat: support o3-mini Azure streaming

* chore: Update model filtering logic to exclude audio and realtime models

* ci: linting error
2025-02-03 16:08:34 -05:00
Ruben Talstra
7c8a930061 feat: added Github Enterprise SSO login (#5621)
* https://github.com/danny-avila/LibreChat/issues/2812

* refactored the code to simplify it.

* removed unneeded code

* removed unneeded code
2025-02-03 15:30:02 -05:00
Ruben Talstra
93f5713c74 🛜 ci: OpenID Strategy Test Async Handling (#5613) 2025-02-03 10:57:49 -05:00
Igor
20aa0be85d 🌍 i18n: Add Missing "Balance" Localization For All Languages (#5594)
* Update AccountSettings.tsx

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-02-03 10:56:44 -05:00
Sam Lewis
d7dc58dd23 🔧 fix: Fetch PWA Manifest with credentials over CORS (#5156)
When behind authentication (for eg: Cloudflare Access), browsers
won't send credentials when fetching the manifest file by default.

To fix, this change adds `crossorigin="use-credentials"` to the
manifest link tag by enabling the `useCredentials` option in
VitePWA.
2025-02-03 10:54:10 -05:00
Danny Avila
45dd2b262f 🛂 feat: OpenID Logout Redirect to end_session_endpoint (#5626)
* WIP: end session endpoint

* refactor: move useGetBannerQuery outside of package

* refactor: add queriesEnabled and move useGetEndpointsConfigQuery to data-provider (local)

* refactor: move useGetEndpointsQuery import to data-provider

* refactor: relocate useGetEndpointsQuery import to improve module organization

* refactor: move `useGetStartupConfig` from package to `~/data-provider`

* refactor: move useGetUserBalance to data-provider and update imports

* refactor: update query enabled conditions to include config check

* refactor: remove unused useConfigOverride import from useAppStartup

* refactor: integrate queriesEnabled state into file and search queries and move useGetSearchEnabledQuery to data-provider (local)

* refactor: move useGetUserQuery to data-provider and update imports

* refactor: enhance loginUser mutation with success and error handling as pass in options to hook

* refactor: update enabled condition in queries to handle undefined config

* refactor: enhance authentication mutations with queriesEnabled state management

* refactor: improve conditional rendering for error messages and feature flags in Login component

* refactor: remove unused queriesEnabled state from AuthContextProvider

* refactor: implement queriesEnabled state management in LoginLayout with timeout handling

* refactor: add conditional check for end session endpoint in OpenID strategy

* ci: fix tests after changes

* refactor: remove endSessionEndpoint from user schema and update logoutController to use OpenID issuer's end_session_endpoint

* refactor: update logoutController to use end_session_endpoint from issuer metadata
2025-02-03 10:53:04 -05:00
Danny Avila
d93f5c9061 ☁️ feat: Additional AI Gateway Provider Support; fix: Reasoning Effort for Presets/Agents (#5600)
* 🐛 fix: Prevent processing of non-artifact nodes in artifact plugin

* refactor: remove deprecated fields, add `reasoning_effort`

* refactor: move `reasoning_effort` to the second column in OpenAI settings

* feat: add support for additional AI Gateway provider in extractBaseURL function

* refactor: move `reasoning_effort` field to conversationPreset and remove from agentOptions
2025-02-02 09:04:10 -05:00
Danny Avila
352565c9a6 🎥 feat: YouTube Tool (#5582)
* adding youtube tool

* refactor: use short `url` param instead of `videoUrl`

* refactor: move API key retrieval to a separate credentials module

* refactor: remove unnecessary `isEdited` message property

* refactor: remove unnecessary `isEdited` message property pt. 2

* refactor: YouTube Tool with new `tool()` generator, handle tools already created by new `tool` generator

* fix: only reset request data for multi-convo messages

* refactor: enhance YouTube tool by adding transcript parsing and returning structured JSON responses

* refactor: update transcript parsing to handle raw response and clean up text output

* feat: support toolkits and refactor YouTube tool as a toolkit for better LLM usage

* refactor: remove unused OpenAPI specs and streamline tools transformation in loadAsyncEndpoints

* refactor: implement manifestToolMap for better tool management and streamline authentication handling

* feat: support toolkits for assistants

* refactor: rename loadedTools to toolDefinitions for clarity in PluginController and assistant controllers

* feat: complete support of toolkits for assistants

---------

Co-authored-by: Danilo Pejakovic <danilo.pejakovic@leoninestudios.com>
2025-01-31 19:11:04 -05:00
Danny Avila
33f6093775 🤖 feat: o3-mini (#5581)
* 🤖 feat: `o3-mini`

* chore: re-order vision models list to prioritize gpt-4o as a vision model over o1
2025-01-31 16:49:01 -05:00
Danny Avila
fdf0b41d08 🐛 fix: Handle content generation errors in GoogleClient (#5575) 2025-01-31 11:22:15 -05:00
Danny Avila
6920e23fb2 🤖 fix: Azure Agents after Upstream Breaking Change (#5571)
* 🤖 fix: Azure Agents after Upstream Breaking Change

* chore: bump @langchain/core & @librechat/agents

* fix: correct formatting in assistant actions update logic and use correctly filtered actions variable

* fix: linting errors
2025-01-31 09:50:49 -05:00
Ruben Talstra
e1a6268904 🍎 feat: Apple auth (#5473)
* implemented Apple Auth login.

Closes: #3438

TODO:
- write config Doc

* removed some comments

* removed comment

* Add unit tests for Apple login strategy

Introduce comprehensive tests for the Apple login strategy, covering new user creation, existing user updates, and error handling scenarios during the authentication flow. Mocks implemented for external dependencies to ensure isolated testing.

* Remove unnecessary blank line in socialLogins.js
2025-01-31 09:49:09 -05:00
Marco Beretta
1c459ed3af 🖱️ feat: Switch Scroll Button setting (#5332) 2025-01-31 07:52:52 -05:00
owengo
8a0c7d92bd 👷 feat: Allow Admin to Edit Agent/Assistant Actions (#4591)
* feat: allows admin to see and edits all actions

* feat: allows admin to see and edits all actions

* rollback: admins can edit all actions, no configuration

* fix: admins don't override the user of existing actions and they preserve the user of the assistant when creating a new action

---------

Co-authored-by: Olivier Schiavo <olivier.schiavo@wengo.com>
2025-01-31 07:45:02 -05:00
JM Addington
9373f77bb7 feat: Add Scripts for listing users and resetting passwords (#5438)
*  feat: Add user management scripts for listing users and resetting passwords

* chore: update package.json

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2025-01-31 07:40:06 -05:00
Fuegovic
6f0ded058f 📝 docs: Update librechat.example.yaml (#5544)
Enable modelSelect and Presets by default
2025-01-31 07:35:18 -05:00
Danny Avila
19fa4d9f54 🧹 chore: Remove Deprecated BingAI Code & Address Mobile Focus (#5565)
* chore: remove all bing code

* chore: remove bing code and auto-focus effects

* chore: add back escapeRegExp helper function for regex special character handling

* chore: remove deprecated fields from settings and conversation schema

* fix: ensure default endpoint is set correctly in conversation setup

* feat: add disableFocus option to newConversation for improved search behavior
2025-01-30 17:22:29 -05:00
James Lamine
1226f56d0c 🔧 fix: Add missing finish_reason to stream chunks (#5563) 2025-01-30 15:24:43 -05:00
James Lamine
85c6a706c3 🔧 fix: handle known OpenAI errors with empty intermediate reply (#5562) 2025-01-30 15:20:34 -05:00
Danny Avila
587d46a20b 🚀 feat: o1 Tool Calling & reasoning_effort (#5553)
* fix: Update @librechat/agents to version 1.9.98

* feat: o1 tool calling

* fix: Improve error logging in RouteErrorBoundary

* refactor: Move extractContent function to utils and clean up Artifact component

* refactor: optimize reasoning UI post-streaming and deprecate plugins rendering

* feat: reasoning_effort support

* fix: update request content type handling in openapiToFunction to remove default 'application/x-www-form-urlencoded'

* chore: bump v0.7.696 data-provider
2025-01-30 12:36:35 -05:00
Danny Avila
591a019766 🏄‍♂️ refactor: Optimize Reasoning UI & Token Streaming (#5546)
*  feat: Implement Show Thinking feature; refactor: testing thinking render optimizations

*  feat: Refactor Thinking component styles and enhance Markdown rendering

* chore: add back removed code, revert type changes

* chore: Add back resetCounter effect to Markdown component for improved code block indexing

* chore: bump @librechat/agents and google langchain packages

* WIP: reasoning type updates

* WIP: first pass, reasoning content blocks

* chore: revert code

* chore: bump @librechat/agents

* refactor: optimize reasoning tag handling

* style: ul indent padding

* feat: add Reasoning component to handle reasoning display

* feat: first pass, content reasoning part styling

* refactor: add content placeholder for endpoints using new stream handler

* refactor: only cache messages when requesting stream audio

* fix: circular dep.

* fix: add default param

* refactor: tts, only request after message stream, fix chrome autoplay

* style: update label for submitting state and add localization for 'Thinking...'

* fix: improve global audio pause logic and reset active run ID

* fix: handle artifact edge cases

* fix: remove unnecessary console log from artifact update test

* feat: add support for continued message handling with new streaming method

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
2025-01-29 19:46:58 -05:00
James Lamine
d60a149ad9 🗨️ fix: Loading Shared Saved Prompts (#5515) 2025-01-28 10:35:17 -05:00
Evren Tan
ad4cfba710 🌱 feat(.env.example): add o1 models (#5106)
* feat(.env.example): add o1-mini and o1-preview to .env.example

* feat(.env.example): add o1 to .env.example

---------

Co-authored-by: Evren Tan <evren.tan@pointr.tech>
2025-01-28 15:56:05 +01:00
Danny Avila
4110209494 ♻️ fix: Prevent Instructions from Removal when nearing Max Context (#5516)
* refactor: getMessagesWithinTokenLimit to accept params object

* refactor: always include instructions in payload if provided

* ci: remove obsolete test

* refactor: update logoutUser to accept request object and handle session destruction

* test: enhance getMessagesWithinTokenLimit tests for instruction handling
2025-01-27 20:37:38 -05:00
Danny Avila
528ee62eb1 🤖 fix: GoogleClient Context Handling & GenAI Parameters (#5503)
* fix: remove legacy code for GoogleClient and fix model parameters for GenAI

* refactor: streamline client init logic

* refactor: remove legacy vertex clients, WIP remote vertex token count

* refactor: enhance GoogleClient with improved type definitions and streamline token count method

* refactor: remove unused methods and consolidate methods

* refactor: remove examples

* refactor: improve input handling logic in DynamicInput component

* refactor: enhance GoogleClient with token usage tracking and context handling improvements

* refactor: update GoogleClient to support 'learnlm' model and streamline model checks

* refactor: remove unused text model handling in GoogleClient

* refactor: record token usage for GoogleClient titles and handle edge cases

* chore: remove unused undici, addresses verbose version warning
2025-01-27 12:21:33 -05:00
oonishi3
47b72e8159 🉐 fix: incorrect handling for composing CJK texts in Safari (#5496) 2025-01-27 11:22:38 -05:00
Ruben Talstra
5f8fade7eb 🔧 chore: bump ``vite`` to patch CVE-2025-24010 (#5495)
Replaced an outdated Vite entry and corrected inconsistencies in dependencies.

Severity: moderate
Websites were able to send any requests to the development server and read the response in vite - https://github.com/advisories/GHSA-vg6x-rcgg-rjx6
2025-01-27 11:20:08 -05:00
Marco Beretta
e7de9c1576 🛡️ refactor: enhance email verification process (#5485) 2025-01-26 20:57:03 -05:00
Danny Avila
12a9a07eb0 🐛 fix: Update deletePromptController to include user role in query (#5488) 2025-01-26 19:03:12 -05:00
Danny Avila
8b31f255f5 🪙 fix: Deepseek Pricing 2025-01-25 10:13:46 -05:00
Danny Avila
60c846b679 🪙 fix: Deepseek Pricing & Titling (#5459) 2025-01-25 10:10:53 -05:00
Danny Avila
af430e46f4 feat: Add Google Parameters, Ollama/Openrouter Reasoning, & UI Optimizations (#5456)
* feat: Google Model Parameters

* fix: dynamic input number value, previously coerced by zod schema

* refactor: support openrouter reasoning tokens and XML for thinking directive to conform to ollama

* fix: virtualize combobox to prevent performance drop on re-renders of long model/agent/assistant lists

* refactor: simplify Fork component by removing unnecessary chat context index

* fix: prevent rendering of Thinking component when children are null

* refactor: update Markdown component to replace <think> tags and simplify remarkPlugins configuration

* refactor: reorder remarkPlugins to improve plugin configuration in Markdown component
2025-01-24 18:15:47 -05:00
Danny Avila
7818ae5c60 🐳 feat: Deepseek Reasoning UI (#5440) 2025-01-24 10:52:08 -05:00
Marco Beretta
b8b7f40e98 🌄 feat: Add RouteErrorBoundary for Improved Client Error handling (#5396)
* feat: Add RouteErrorBoundary for improved error handling and integrate react-error-boundary package

* feat: update error message

* fix: correct typo in containerClassName prop in Landing component
2025-01-24 08:34:44 -05:00
Danny Avila
ed57bb4711 🚀 feat: Artifact Editing & Downloads (#5428)
* refactor: expand container

* chore: bump @codesandbox/sandpack-react to latest

* WIP: first pass, show editor

* feat: implement ArtifactCodeEditor and ArtifactTabs components for enhanced artifact management

* refactor: fileKey

* refactor: auto scrolling code editor and add messageId to artifact

* feat: first pass, editing artifact

* feat: first pass, robust artifact replacement

* fix: robust artifact replacement & re-render when expected

* feat: Download Artifacts

* refactor: improve artifact editing UX

* fix: layout shift of new download button

* fix: enhance missing output checks and logging in StreamRunManager
2025-01-23 18:19:04 -05:00
Danny Avila
87383fec27 🔧 chore: Update Deepseek Pricing, Google Safety Settings (#5409)
* fix: google-thinking model safety settings fix

* chore: update pricing/context for deepseek models

* ci: update Deepseek model token limits to use dynamic mapping
2025-01-22 07:50:09 -05:00
Marco Beretta
2d3dd9e351 ️ a11y: Enhance Accessibility in ToolSelectDialog, ThemeSelector and ChatGroupItem (#5395)
* feat: Add keyboard shortcut for theme switching and improve accessibility announcements

* fix: Improve accessibility of ToolSelectDialog close button

* feat: Enhance accessibility in ChatGroupItem component
2025-01-21 21:54:13 -05:00
Danny Avila
199e5e6eaf 🛠️ fix: Optionally add OpenID Sig. Algo. from Server Discovery (#5398)
* fix: Optionally add OpenID Sig. Algorithm from Server Discovery

* chore: bump vite to 5.4.14 for CVE-2025-24010

* chore: remove deprecated code

* fix: install missing undici

* fix: Add @waylaidwanderer/fetch-event-source package
2025-01-21 21:49:27 -05:00
Marco Beretta
fa9e778399 🔗 feat: Enhance Share Functionality, Optimize DataTable & Fix Critical Bugs (#5220)
* 🔄 refactor: frontend and backend share link logic; feat: qrcode for share link; feat: refresh link

* 🐛 fix: Conditionally render shared link and refactor share link creation logic

* 🐛 fix: Correct conditional check for shareId in ShareButton component

* 🔄 refactor: Update shared links API and data handling; improve query parameters and response structure

* 🔄 refactor: Update shared links pagination and response structure; replace pageNumber with cursor for improved data fetching

* 🔄 refactor: DataTable performance optimization

* fix: delete shared link cache update

* 🔄 refactor: Enhance shared links functionality; add conversationId to shared link model and update related components

* 🔄 refactor: Add delete functionality to SharedLinkButton; integrate delete mutation and confirmation dialog

* 🔄 feat: Add AnimatedSearchInput component with gradient animations and search functionality; update search handling in API and localization

* 🔄 refactor: Improve SharedLinks component; enhance delete functionality and loading states, optimize AnimatedSearchInput, and refine DataTable scrolling behavior

* fix: mutation type issues with deleted shared link mutation

* fix: MutationOptions types

* fix: Ensure only public shared links are retrieved in getSharedLink function

* fix: `qrcode.react` install location

* fix: ensure non-public shared links are not fetched when checking for existing shared links, and remove deprecated .exec() method for queries

* fix: types and import order

* refactor: cleanup share button UI logic, make more intuitive

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2025-01-21 09:31:05 -05:00
Danny Avila
460cde0c0b 🔒 chore: bump katex package to patch CVE-2025-23207 (#5383)
* chore: bump `katex` to patch `CVE-2025-23207`

* chore: prevent adding Parameters panel for agent endpoints in SideNav
2025-01-20 22:02:18 -05:00
Danny Avila
d6b4d83b68 🔥 feat: deepseek-reasoner Thought Streaming (#5379)
* 🔧 refactor: Remove unused penalties and enhance reasoning token handling in OpenAIClient

* 🔧 refactor: `addInstructions` default to adding instructions at index 0, flag for legacy behavior

* chore: remove long placeholder

* chore: update localization strings across multiple languages

* ci: adjust tests for new `addInstructions` behavior
2025-01-20 18:21:18 -05:00
Marco Beretta
79585e22d2 🔈fix: Accessible name on 'Prev' button in Prompts UI (#5369)
Fixes #5310

Add `aria-label="previous"` attribute to the 'Prev' button in the Prompts Panel.

* Modify `client/src/components/Chat/Prompts.tsx` to include `aria-label="previous"` attribute for the button.
2025-01-20 17:14:49 -05:00
Ragavendaran Puliyadi
a2305c3a7c 🐛 fix: use OpenID token signature algo as discovered from the server (#5348)
* 🐛 fix: use OpenID token signature algo as discovered from the server.

* 📜 refactor: Keeping other props that uses alg.

* 🔧 fix: handle missing property

* 📘 refactor: add comment block
2025-01-20 17:14:07 -05:00
Ragavendaran P R
d048a10b2e 📜 refactor: Log Error Messages when OAuth Fails (#5337) 2025-01-18 09:32:41 -05:00
Danny Avila
e6670cd411 🔧 chore: bump mongoose to patch CVE-2025-23061 (#5351) 2025-01-17 13:09:46 -05:00
Danny Avila
b35a8b78e2 🔧 refactor: Improve Agent Context & Minor Fixes (#5349)
* refactor: Improve Context for Agents

* 🔧 fix: Safeguard against undefined properties in OpenAIClient response handling

* refactor: log error before re-throwing for original stack trace

* refactor: remove toolResource state from useFileHandling, allow svg files

* refactor: prevent verbose logs from axios errors when using actions

* refactor: add silent method recordTokenUsage in AgentClient

* refactor: streamline token count assignment in BaseClient

* refactor: enhance safety settings handling for Gemini 2.0 model

* fix: capabilities structure in MCPConnection

* refactor: simplify civic integrity threshold handling in GoogleClient and llm

* refactor: update token count retrieval method in BaseClient tests

* ci: fix test for svg
2025-01-17 12:55:48 -05:00
Danny Avila
e309c6abef 🎯 fix: Prevent UI De-sync By Removing Redundant States (#5333)
* fix: remove local state from Dropdown causing de-sync

* refactor: cleanup STT code, avoid redundant states to prevent de-sync and side effects

* fix: reset transcript after sending final text to prevent data loss

* fix: clear timeout on component unmount to prevent memory leaks
2025-01-16 17:38:59 -05:00
Marco Beretta
b55e695541 🔧 fix: Maximize Chat Space for Agent Messages (#5330) 2025-01-16 17:28:33 -05:00
Danny Avila
24d30d7428 🏃‍♂️➡️ feat: Upgrade Meilisearch to v1.12.3 (#5327) 2025-01-16 08:25:33 -05:00
Danny Avila
aa80e4594e ♻️ refactor: Logout UX, Improved State Teardown, & Remove Unused Code (#5292)
* refactor: SearchBar and Nav components to streamline search functionality and improve state management

* refactor: remove refresh conversations

* chore: update useNewConvo calls to remove hardcoded default index

* refactor: null check for submission in useSSE hook

* refactor: remove useConversation hook and update useSearch to utilize useNewConvo

* refactor: remove conversation and banner store files; consolidate state management into misc; improve typing of families and add messagesSiblingIdxFamily

* refactor: more effectively clear all user/convo state without side effects on logout/delete user

* refactor: replace useParams with useLocation in SearchBar to correctly load conversation

* refactor: update SearchButtons to use button element and improve conversation ID handling

* refactor: use named function for `newConversation` for better call stack tracing

* refactor: enhance TermsAndConditionsModal to support array content and improve type definitions for terms of service

* refactor: add SetConvoProvider and message invalidation when navigating from search results to prevent initial route rendering edge cases

* refactor: rename getLocalStorageItems to localStorage and update imports for consistency

* refactor: move clearLocalStorage function to utils and simplify localStorage clearing logic

* refactor: migrate authentication mutations to a dedicated Auth data provider and update related tests
2025-01-12 12:57:10 -05:00
Danny Avila
24beda3d69 🐛 fix: Resolve 'Icon is Not a Function' Error in PresetItems (#5260)
* refactor: improve typing

* fix: "TypeError: Icon is not a function" with proper use of Functional Component and Improved Typing
2025-01-10 19:00:44 -05:00
Danny Avila
0855677a36 🌤️ feat: Add OpenWeather Tool for Weather Data Retrieval (#5246)
*  feat: Add OpenWeather Tool for Weather Data Retrieval 🌤️

* chore: linting

* chore: move test files

* fix: tool icon, allow user-provided keys, conform to app key assignment pattern

* chore: linting not included in #5212

---------

Co-authored-by: Jonathan Addington <jonathan.addington@jmaddington.com>
2025-01-10 08:54:08 -05:00
Danny Avila
ea1a5c8a30 🐛 fix: Handle optional endpoints in processModelSpecs function 2025-01-09 18:18:14 -05:00
Danny Avila
0f95604a67 ️ refactor: Optimize Rendering Performance for Icons, Conversations (#5234)
* refactor: HoverButtons and Fork components to use explicit props

* refactor: improve typing for Fork Component

* fix: memoize SpecIcon to avoid unnecessary re-renders

* feat: introduce URLIcon component and update SpecIcon for improved icon handling

* WIP: optimizing icons

* refactor: simplify modelLabel assignment in Message components

* refactor: memoize ConvoOptions component to optimize rendering performance
2025-01-09 15:40:10 -05:00
Danny Avila
687ab32bd3 🔧 fix: Streamline Builder Links and Enhance UI Consistency (#5229)
* fix: Include iconURL in Bedrock client initialization

* fix: unnecessary filtering for agent file_search files

* chore: use theme bg colors

* refactor: rely on endpoint config for enabling builder links in side navigation instead of parameters

* fix: remove unnecessary keyProvided check for agent builder link
2025-01-09 12:03:35 -05:00
Lars Kiesow
dd927583a7 Provide production-ready memory store for eypress-session (#5212)
The `express-session` library comes with a session storage meant for
testing by default. That is why you get a message like this when you
start up LibreChat with OIDC enabled:

    Warning: connect.session() MemoryStore is not
    designed for a production environment, as it will leak
    memory, and will not scale past a single process.

LibreChat can already use Redis as a session storage, although Redis support
is still marked as experimental. It also makes the set-up more complex, since
you will need to configure and run yet another service.

This pull request provides a simple alternative by using a in-memory session
store marked as a production-ready alternative by the guys from
`express-session`¹. You can still configure Redis, but this provides a simple,
good default for everyone else.

See also https://github.com/danny-avila/LibreChat/discussions/1014

¹⁾ https://github.com/expressjs/session?tab=readme-ov-file#compatible-session-stores
2025-01-09 11:23:51 -05:00
Danny Avila
69a9b8b911 🐛 fix: Ensure Default ModelSpecs Are Set Correctly (#5218)
* 🐛 fix: default modelSpecs not being set

* feat: Add imageDetail parameter for OpenAI endpoints in tQueryParamsSchema

* feat: Implement processModelSpecs function to enhance model specs processing from configuration

* feat: Refactor configuration schemas and types for improved structure and clarity

* feat: Add append_current_datetime parameter to tQueryParamsSchema for enhanced endpoint functionality

* fix: Add endpointType to getSaveOptions and enhance endpoint handling in Settings component

* fix: Change endpointType to be nullable and optional in tConversationSchema for improved flexibility

* fix: allow save & submit for google endpoint
2025-01-08 21:57:00 -05:00
Danny Avila
916faf6447 🐛 fix: Correct Endpoint/Icon Handling, Update Module Resolutions (#5205)
* fix: agent modelSpec iconURLs not being recorded

* fix: prioritize message properties over conversation defaults in icon data

* fix: determine endpoint type from endpointsConfig

* chore: type issue with setting.columnSpan

* chore: remove redundant key indexing for keySchema

* chore: bump version to 0.7.691 in package.json

* chore: add stricter remark-gfm and mdast-util-gfm resolutions/overrides

* chore: remove rollup override and bump vite-plugin-pwa

* chore: reinstall remark-gfm for correct module resolution

* chore: reinstall vite-plugun-pwa
2025-01-07 11:09:18 -05:00
Danny Avila
8aa1e731ca feat: Quality-of-Life Chat/Edit-Message Enhancements (#5194)
* fix: rendering error for mermaid flowchart syntax

* feat: add submit button ref and enable submit on Ctrl+Enter in EditMessage component

* feat: add save button and keyboard shortcuts for saving and canceling in EditMessage component

* feat: collapse chat on max height

* refactor: implement scrollable detection for textarea on key down events and initial render

* feat: add regenerate button for error handling in HoverButtons, closes #3658

* feat: add functionality to edit latest user message with the up arrow key when the input is empty
2025-01-06 22:47:24 -05:00
Danny Avila
b01c744eb8 🧵 fix: Prevent Unnecessary Re-renders when Loading Chats (#5189)
* chore: typing

* chore: typing

* fix: enhance message scrolling logic to handle empty messages tree and ref checks

* fix: optimize message selection logic with useCallback for better performance

* chore: typing

* refactor: optimize icon rendering

* refactor: further optimize chat props

* fix: remove unnecessary console log in useQueryParams cleanup

* refactor: add queryClient to reset message data on new conversation initiation

* refactor: update data-testid attributes for consistency and improve code readability

* refactor: integrate queryClient to reset message data on new conversation initiation
2025-01-06 10:32:44 -05:00
Danny Avila
7987e04a2c 🔗 feat: Convo Settings via URL Query Params & Mention Models (#5184)
* feat: first pass, convo settings from query params

* feat: Enhance query parameter handling for assistants and agents endpoints

* feat: Update message formatting and localization for AI responses, bring awareness to mention command

* docs: Update translations README with detailed instructions for translation script usage and contribution guidelines

* chore: update localizations

* fix: missing agent_id assignment

* feat: add models as initial mention option

* feat: update query parameters schema to confine possible query params

* fix: normalize custom endpoints

* refactor: optimize custom endpoint type check
2025-01-04 20:36:12 -05:00
Danny Avila
766657da83 🔖 fix: Remove Local State from Bookmark Menu (#5181)
* chore: remove redundant

* fix: bookmark menu statefulness by removing local state
2025-01-04 12:01:13 -05:00
Danny Avila
7c61115a88 🐛 fix: Prevent Default Values in OpenAI/Custom Endpoint Agents (#5180)
* fix: prevent OpenAI/custom-endpoint agents from using default values

* fix: order of assigning client options

* chore: typing for runnable config
2025-01-04 09:41:59 -05:00
Danny Avila
c26b54c74d 🔄 refactor: Consolidate Tokenizer; Fix Jest Open Handles (#5175)
* refactor: consolidate tokenizer to singleton

* fix: remove legacy tokenizer code, add Tokenizer singleton tests

* ci: fix jest open handles
2025-01-03 18:11:14 -05:00
Danny Avila
bf0a84e45a ®️ feat: Support Rscript for Code Interpreter & recursionLimit for Agents (#5170)
* chore: bump @librechat/agents to v1.9.8 for rscript support

* chore: fix @langchain/google-genai dep., match agents

* chore: fix @langchain/google-vertexai to v0.1.5, match with agents

* chore: bump @librechat/agents to v1.9.9

* chore: update @librechat/agents to v1.9.91 and @langchain/google-vertexai to v0.1.6

* chore: increase MAX_FILE_SIZE to 150MB for file uploads

* chore: bump @librechat/agents to v1.9.92

* feat: support `recursionLimit` for agents

* chore: update configuration version to 1.2.1 in librechat.yaml and config.ts

* feat: add R language SVG icon to the assets and include it in ApiKeyDialog

* feat: add support for new vision model 'o1' and exclude 'o1-mini'
2025-01-03 16:50:00 -05:00
Julian Dreykorn
28966e3ddc 🧾 docs: Update Example librechat.yaml
* docs: Add mcpServers, agents and actions to the config
2025-01-03 08:35:00 -05:00
Thinger Soft
65b2d647a1 🔧 fix: Handle Concurrent File Mgmt. For Agents (#5159)
* fix: handle concurrent file upload for agents rag

Closes #4746:

* fix: handle concurrent file deletions for agents rag

Closes #5160:

* refactor: remove useless promise wrapping
2025-01-02 08:29:07 -05:00
Danny Avila
6c9a468b8e 🐛 fix: Artifacts Type Error, Tool Token Counts, and Agent Chat Import (#5142)
* fix: message import functionality to support content field

* fix: handle tool calls token counts in context window management

* fix: handle potential undefined size in FilePreview component
2024-12-30 13:01:47 -05:00
Marco Beretta
cb1921626e 🎨 feat: enhance Chat Input UI, File Mgmt. UI, Bookmarks a11y (#5112)
* 🎨 feat: improve file display and overflow handling in SidePanel components

* 🎨 feat: enhance bookmarks management UI and improve accessibility features

* 🎨 feat: enhance BookmarkTable and BookmarkTableRow components for improved layout and performance

* 🎨 feat: enhance file display and interaction in FilesView and ImagePreview components

* 🎨 feat: adjust minimum width for filename filter input in DataTable component

* 🎨 feat: enhance file upload UI with improved layout and styling adjustments

* 🎨 feat: add surface-hover-alt color and update FileContainer styling for improved UI

* 🎨 feat: update ImagePreview component styling for improved visual consistency

* 🎨 feat: add MaximizeChatSpace component and integrate chat space maximization feature

* 🎨 feat: enhance DataTable component with transition effects and update Checkbox styling for improved accessibility

* fix: enhance a11y for Bookmark buttons by adding space key support, ARIA labels, and correct html role for key presses

* fix: return focus back to trigger for BookmarkEditDialog (Edit and new bookmark buttons)

* refactor: ShareButton and ExportModal components children prop support; refactor DropdownPopup item handling

* refactor: enhance ExportAndShareMenu and ShareButton components with improved props handling and accessibility features

* refactor: add ref prop support to MenuItemProps and update ExportAndShareMenu and DropdownPopup components so focus correctly returns to menu item

* refactor: enhance ConvoOptions and DeleteButton components with improved props handling and accessibility features

* refactor: add triggerRef support to DeleteButton and update ConvoOptions for improved dialog handling

* refactor: accessible bookmarks menu

* refactor: improve styling and accessibility for bookmarks components

* refactor: add focusLoop support to DropdownPopup and update BookmarkMenu with Tooltip

* refactor: integrate TooltipAnchor into ExportAndShareMenu for enhanced accessibility

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-12-29 17:31:41 -05:00
Danny Avila
d9c59b08e6 🔑 feat: Implement TTL Mgmt. for In-Memory Keyv Stores (#5127)
This commit updates the cache stores in the `getLogStores.js` file to use Redis as the store if the `USE_REDIS` environment variable is enabled. It also adds a new environment variable `DEBUG_MEMORY_CACHE` to enable debugging of the memory cache.
2024-12-28 17:32:05 -05:00
Danny Avila
24cad6bbd4 🤖 feat: Support Google Agents, fix Various Provider Configurations (#5126)
* feat: Refactor ModelEndHandler to collect usage metadata only if it exists

* feat: google tool end handling, custom anthropic class for better token ux

* refactor: differentiate between client <> request options

* feat: initial support for google agents

* feat: only cache messages with non-empty text

* feat: Cache non-empty messages in chatV2 controller

* fix: anthropic llm client options llmConfig

* refactor: streamline client options handling in LLM configuration

* fix: VertexAI Agent Auth & Tool Handling

* fix: additional fields for llmConfig, however customHeaders are not supported by langchain, requires PR

* feat: set default location for vertexai LLM configuration

* fix: outdated OpenAI Client options for getLLMConfig

* chore: agent provider options typing

* chore: add note about currently unsupported customHeaders in langchain GenAI client

* fix: skip transaction creation when rawAmount is NaN
2024-12-28 17:15:03 -05:00
Danny Avila
a423eb8c7b fix: Improve Accessibility in Endpoints Menu/Navigation (#5123)
* fix: prevent mobile nav toggle from being focusable when not in mobile view, add types to <NavToggle/>

* fix: appropriate endpoint menu item role, add up/down focus mgmt, ensure set api key is focusable and accessible

* fix: localize link titles and update text color for improved accessibility in Nav component
2024-12-28 12:58:12 -05:00
Marco Beretta
d6f1ecf75c 🔒 fix: update refresh token handling to use plain token instead of hashed token (#5088)
* 🔒 fix: update refresh token handling to use plain token instead of hashed token

* 🔒 fix: simplify logoutUser by using plain refresh token for session lookup
2024-12-23 18:38:16 +01:00
Alex Torregrosa
04923dd185 🐋 refactor: Reduce Dockerfile.multi container size (#5066)
* fix: Reduce Dockerfile.multi container size

Reduced container size from 1.46 GB to 1.12 GB.

* Use `npm ci` without devDependencies for final image
* Remove unneeded `npm prune commands`

* Update Dockerfile.multi

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2024-12-23 05:17:05 -05:00
Marco Beretta
dfe5498301 🎨 feat: enhance UI & accessibility in file handling components (#5086)
*  feat: Add localization for page display and enhance button styles

*  refactor: improve image preview component styles

*  refactor: enhance modal close behavior and prevent refocus on certain elements

*  refactor: enhance file row layout and improve image preview animation
2024-12-23 05:14:40 -05:00
Marco Beretta
bdb222d5f4 🔒 fix: resolve session persistence post password reset (#5077)
*  feat: Implement session management with CRUD operations and integrate into user workflows

*  refactor: Update session model import paths and enhance session creation logic in AuthService

*  refactor: Validate session and user ID formats in session management functions

*  style: Enhance UI components with improved styling and accessibility features

* chore: Update login form tests to use getByTestId instead of getByRole, remove console.log()

* chore: Update login form tests to use getByTestId instead of getByRole

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-12-23 05:12:07 -05:00
Marco Beretta
9bca2ae953 📘 docs: update readme.md (#5065) 2024-12-23 04:46:51 -05:00
821 changed files with 48000 additions and 57938 deletions

View File

@@ -20,6 +20,11 @@ DOMAIN_CLIENT=http://localhost:3080
DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
# Defaulted to 1.
TRUST_PROXY=1
#===============#
# JSON Logging #
@@ -53,7 +58,7 @@ DEBUG_CONSOLE=false
# Endpoints #
#===================================================#
# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,google,gptPlugins,anthropic
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic
PROXY=
@@ -83,7 +88,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -105,13 +110,6 @@ ANTHROPIC_API_KEY=user_provided
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
# PLUGINS_USE_AZURE="true" # Deprecated
#============#
# BingAI #
#============#
BINGAI_TOKEN=user_provided
# BINGAI_HOST=https://cn.bing.com
#=================#
# AWS Bedrock #
#=================#
@@ -177,7 +175,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -211,12 +209,6 @@ ASSISTANTS_API_KEY=user_provided
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
#============#
# OpenRouter #
#============#
# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
# OPENROUTER_API_KEY=
#============#
# Plugins #
#============#
@@ -256,11 +248,16 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
# Google
#-----------------
GOOGLE_SEARCH_API_KEY=
GOOGLE_CSE_ID=
# YOUTUBE
#-----------------
YOUTUBE_API_KEY=
# SerpAPI
#-----------------
SERPAPI_API_KEY=
@@ -294,6 +291,10 @@ MEILI_NO_ANALYTICS=true
MEILI_HOST=http://0.0.0.0:7700
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
# Optional: Disable indexing, useful in a multi-node setup
# where only one instance should perform an index sync.
# MEILI_NO_SYNC=true
#==================================================#
# Speech to Text & Text to Speech #
#==================================================#
@@ -391,12 +392,22 @@ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GITHUB_CALLBACK_URL=/oauth/github/callback
# GitHub Enterprise
# GITHUB_ENTERPRISE_BASE_URL=
# GITHUB_ENTERPRISE_USER_AGENT=
# Google
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
GOOGLE_CALLBACK_URL=/oauth/google/callback
# Apple
APPLE_CLIENT_ID=
APPLE_TEAM_ID=
APPLE_KEY_ID=
APPLE_PRIVATE_KEY_PATH=
APPLE_CALLBACK_URL=/oauth/apple/callback
# OpenID
OPENID_CLIENT_ID=
OPENID_CLIENT_SECRET=
@@ -487,6 +498,16 @@ HELP_AND_FAQ_URL=https://librechat.ai
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
#===============#
# REDIS Options #
#===============#
# REDIS_URI=10.10.10.10:6379
# USE_REDIS=true
# USE_REDIS_CLUSTER=true
# REDIS_CA=/path/to/ca.crt
#==================================================#
# Others #
#==================================================#
@@ -494,9 +515,6 @@ HELP_AND_FAQ_URL=https://librechat.ai
# NODE_ENV=
# REDIS_URI=
# USE_REDIS=
# E2E_USER_EMAIL=
# E2E_USER_PASSWORD=
@@ -514,4 +532,9 @@ HELP_AND_FAQ_URL=https://librechat.ai
# no-cache: Forces validation with server before using cached version
# no-store: Prevents storing the response entirely
# must-revalidate: Prevents using stale content when offline
# must-revalidate: Prevents using stale content when offline
#=====================================================#
# OpenWeather #
#=====================================================#
OPENWEATHER_API_KEY=

View File

@@ -1,213 +0,0 @@
module.exports = {
env: {
browser: true,
es2021: true,
node: true,
commonjs: true,
es6: true,
},
extends: [
'eslint:recommended',
'plugin:react/recommended',
'plugin:react-hooks/recommended',
'plugin:jest/recommended',
'prettier',
'plugin:jsx-a11y/recommended',
],
ignorePatterns: [
'client/dist/**/*',
'client/public/**/*',
'e2e/playwright-report/**/*',
'packages/mcp/types/**/*',
'packages/mcp/dist/**/*',
'packages/mcp/test_bundle/**/*',
'api/demo/**/*',
'packages/data-provider/types/**/*',
'packages/data-provider/dist/**/*',
'packages/data-provider/test_bundle/**/*',
'data-node/**/*',
'meili_data/**/*',
'node_modules/**/*',
],
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
ecmaFeatures: {
jsx: true,
},
},
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import', 'jsx-a11y'],
rules: {
'react/react-in-jsx-scope': 'off',
'@typescript-eslint/ban-ts-comment': ['error', { 'ts-ignore': 'allow' }],
indent: ['error', 2, { SwitchCase: 1 }],
'max-len': [
'error',
{
code: 120,
ignoreStrings: true,
ignoreTemplateLiterals: true,
ignoreComments: true,
},
],
'linebreak-style': 0,
curly: ['error', 'all'],
semi: ['error', 'always'],
'object-curly-spacing': ['error', 'always'],
'no-multiple-empty-lines': ['error', { max: 1 }],
'no-trailing-spaces': 'error',
'comma-dangle': ['error', 'always-multiline'],
// "arrow-parens": [2, "as-needed", { requireForBlockBody: true }],
// 'no-plusplus': ['error', { allowForLoopAfterthoughts: true }],
'no-console': 'off',
'import/no-cycle': 'error',
'import/no-self-import': 'error',
'import/extensions': 'off',
'no-promise-executor-return': 'off',
'no-param-reassign': 'off',
'no-continue': 'off',
'no-restricted-syntax': 'off',
'react/prop-types': ['off'],
'react/display-name': ['off'],
'no-nested-ternary': 'error',
'no-unused-vars': ['error', { varsIgnorePattern: '^_' }],
quotes: ['error', 'single'],
},
overrides: [
{
files: ['**/*.ts', '**/*.tsx'],
rules: {
'no-unused-vars': 'off', // off because it conflicts with '@typescript-eslint/no-unused-vars'
'react/display-name': 'off',
'@typescript-eslint/no-unused-vars': 'warn',
},
},
{
files: ['rollup.config.js', '.eslintrc.js', 'jest.config.js'],
env: {
node: true,
},
},
{
files: [
'**/*.test.js',
'**/*.test.jsx',
'**/*.test.ts',
'**/*.test.tsx',
'**/*.spec.js',
'**/*.spec.jsx',
'**/*.spec.ts',
'**/*.spec.tsx',
'setupTests.js',
],
env: {
jest: true,
node: true,
},
rules: {
'react/display-name': 'off',
'react/prop-types': 'off',
'react/no-unescaped-entities': 'off',
},
},
{
files: ['**/*.ts', '**/*.tsx'],
parser: '@typescript-eslint/parser',
parserOptions: {
project: './client/tsconfig.json',
},
plugins: ['@typescript-eslint/eslint-plugin', 'jest'],
extends: [
'plugin:@typescript-eslint/eslint-recommended',
'plugin:@typescript-eslint/recommended',
],
rules: {
'@typescript-eslint/no-explicit-any': 'error',
'@typescript-eslint/no-unnecessary-condition': 'warn',
'@typescript-eslint/strict-boolean-expressions': 'warn',
},
},
{
files: './packages/data-provider/**/*.ts',
overrides: [
{
files: '**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './packages/data-provider/tsconfig.json',
},
},
],
},
{
files: './api/demo/**/*.ts',
overrides: [
{
files: '**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './packages/data-provider/tsconfig.json',
},
},
],
},
{
files: './packages/mcp/**/*.ts',
overrides: [
{
files: '**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './packages/mcp/tsconfig.json',
},
},
],
},
{
files: './config/translations/**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './config/translations/tsconfig.json',
},
},
{
files: ['./packages/data-provider/specs/**/*.ts'],
parserOptions: {
project: './packages/data-provider/tsconfig.spec.json',
},
},
{
files: ['./api/demo/specs/**/*.ts'],
parserOptions: {
project: './packages/data-provider/tsconfig.spec.json',
},
},
{
files: ['./packages/mcp/specs/**/*.ts'],
parserOptions: {
project: './packages/mcp/tsconfig.spec.json',
},
},
],
settings: {
react: {
createClass: 'createReactClass', // Regex for Component Factory to use,
// default to "createReactClass"
pragma: 'React', // Pragma to use, default to "React"
fragment: 'Fragment', // Fragment to use (may be a property of <pragma>), default to "Fragment"
version: 'detect', // React version. "detect" automatically picks the version you have installed.
},
'import/parsers': {
'@typescript-eslint/parser': ['.ts', '.tsx'],
},
'import/resolver': {
typescript: {
project: ['./client/tsconfig.json'],
},
node: {
project: ['./client/tsconfig.json'],
},
},
},
};

View File

@@ -1,12 +1,19 @@
name: Bug Report
description: File a bug report
title: "[Bug]: "
labels: ["bug"]
labels: ["🐛 bug"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
Before submitting, please:
- Search existing [Issues and Discussions](https://github.com/danny-avila/LibreChat/discussions) to see if your bug has already been reported
- Use [Discussions](https://github.com/danny-avila/LibreChat/discussions) instead of Issues for:
- General inquiries
- Help with setup
- Questions about whether you're experiencing a bug
- type: textarea
id: what-happened
attributes:
@@ -15,6 +22,23 @@ body:
placeholder: Please give as many details as possible
validations:
required: true
- type: textarea
id: version-info
attributes:
label: Version Information
description: |
If using Docker, please run and provide the output of:
```bash
docker images | grep librechat
```
If running from source, please run and provide the output of:
```bash
git rev-parse HEAD
```
placeholder: Paste the output here
validations:
required: true
- type: textarea
id: steps-to-reproduce
attributes:
@@ -39,7 +63,21 @@ body:
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
description: |
Please paste relevant logs that were created when reproducing the error.
Log locations:
- Docker: Project root directory ./logs
- npm: ./api/logs
There are two types of logs that can help diagnose the issue:
- debug logs (debug-YYYY-MM-DD.log)
- error logs (error-YYYY-MM-DD.log)
Error logs contain exact stack traces and are especially helpful, but both can provide valuable information.
Please only include the relevant portions of logs that correspond to when you reproduced the error.
For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here.
render: shell
- type: textarea
id: screenshots
@@ -53,4 +91,4 @@ body:
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true
required: true

View File

@@ -1,7 +1,7 @@
name: Feature Request
description: File a feature request
title: "Enhancement: "
labels: ["enhancement"]
title: "[Enhancement]: "
labels: ["enhancement"]
body:
- type: markdown
attributes:

View File

@@ -0,0 +1,42 @@
name: Locize Translation Access Request
description: Request access to an additional language in Locize for LibreChat translations.
title: "Locize Access Request: "
labels: ["🌍 i18n", "🔑 access request"]
body:
- type: markdown
attributes:
value: |
Thank you for your interest in contributing to LibreChat translations!
Please fill out the form below to request access to an additional language in **Locize**.
**🔗 Available Languages:** [View the list here](https://www.librechat.ai/docs/translation)
**📌 Note:** Ensure that the requested language is supported before submitting your request.
- type: input
id: account_name
attributes:
label: Locize Account Name
description: Please provide your Locize account name (e.g., John Doe).
placeholder: e.g., John Doe
validations:
required: true
- type: input
id: language_requested
attributes:
label: Language Code (ISO 639-1)
description: |
Enter the **ISO 639-1** language code for the language you want to translate into.
Example: `es` for Spanish, `zh-Hant` for Traditional Chinese.
**🔗 Reference:** [Available Languages](https://www.librechat.ai/docs/translation)
placeholder: e.g., es
validations:
required: true
- type: checkboxes
id: agreement
attributes:
label: Agreement
description: By submitting this request, you confirm that you will contribute responsibly and adhere to the project guidelines.
options:
- label: I agree to use my access solely for contributing to LibreChat translations.
required: true

View File

@@ -0,0 +1,33 @@
name: New Language Request
description: Request to add a new language for LibreChat translations.
title: "New Language Request: "
labels: ["✨ enhancement", "🌍 i18n"]
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to submit a new language request! Please fill out the following details so we can review your request.
- type: input
id: language_name
attributes:
label: Language Name
description: Please provide the full name of the language (e.g., Spanish, Mandarin).
placeholder: e.g., Spanish
validations:
required: true
- type: input
id: iso_code
attributes:
label: ISO 639-1 Code
description: Please provide the ISO 639-1 code for the language (e.g., es for Spanish). You can refer to [this list](https://www.w3schools.com/tags/ref_language_codes.asp) for valid codes.
placeholder: e.g., es
validations:
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@@ -1,50 +0,0 @@
name: Question
description: Ask your question
title: "[Question]: "
labels: ["question"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill this!
- type: textarea
id: what-is-your-question
attributes:
label: What is your question?
description: Please give as many details as possible
placeholder: Please give as many details as possible
validations:
required: true
- type: textarea
id: more-details
attributes:
label: More Details
description: Please provide more details if needed.
placeholder: Please provide more details if needed.
validations:
required: true
- type: dropdown
id: browsers
attributes:
label: What is the main subject of your question?
multiple: true
options:
- Documentation
- Installation
- UI
- Endpoints
- User System/OAuth
- Other
- type: textarea
id: screenshots
attributes:
label: Screenshots
description: If applicable, add screenshots to help explain your problem. You can drag and drop, paste images directly here or link to them.
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

60
.github/configuration-release.json vendored Normal file
View File

@@ -0,0 +1,60 @@
{
"categories": [
{
"title": "### ✨ New Features",
"labels": ["feat"]
},
{
"title": "### 🌍 Internationalization",
"labels": ["i18n"]
},
{
"title": "### 👐 Accessibility",
"labels": ["a11y"]
},
{
"title": "### 🔧 Fixes",
"labels": ["Fix", "fix"]
},
{
"title": "### ⚙️ Other Changes",
"labels": ["ci", "style", "docs", "refactor", "chore"]
}
],
"ignore_labels": [
"🔁 duplicate",
"📊 analytics",
"🌱 good first issue",
"🔍 investigation",
"🙏 help wanted",
"❌ invalid",
"❓ question",
"🚫 wontfix",
"🚀 release",
"version"
],
"base_branches": ["main"],
"sort": {
"order": "ASC",
"on_property": "mergedAt"
},
"label_extractor": [
{
"pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
"target": "$1",
"flags": "i",
"on_property": "title",
"method": "match"
},
{
"pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
"target": "version",
"flags": "i",
"on_property": "title",
"method": "match"
}
],
"template": "## [#{{TO_TAG}}] - #{{TO_TAG_DATE}}\n\nChanges from #{{FROM_TAG}} to #{{TO_TAG}}.\n\n#{{CHANGELOG}}\n\n[See full release details][release-#{{TO_TAG}}]\n\n[release-#{{TO_TAG}}]: https://github.com/#{{OWNER}}/#{{REPO}}/releases/tag/#{{TO_TAG}}\n\n---",
"pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
"empty_template": "- no changes"
}

68
.github/configuration-unreleased.json vendored Normal file
View File

@@ -0,0 +1,68 @@
{
"categories": [
{
"title": "### ✨ New Features",
"labels": ["feat"]
},
{
"title": "### 🌍 Internationalization",
"labels": ["i18n"]
},
{
"title": "### 👐 Accessibility",
"labels": ["a11y"]
},
{
"title": "### 🔧 Fixes",
"labels": ["Fix", "fix"]
},
{
"title": "### ⚙️ Other Changes",
"labels": ["ci", "style", "docs", "refactor", "chore"]
}
],
"ignore_labels": [
"🔁 duplicate",
"📊 analytics",
"🌱 good first issue",
"🔍 investigation",
"🙏 help wanted",
"❌ invalid",
"❓ question",
"🚫 wontfix",
"🚀 release",
"version",
"action"
],
"base_branches": ["main"],
"sort": {
"order": "ASC",
"on_property": "mergedAt"
},
"label_extractor": [
{
"pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
"target": "$1",
"flags": "i",
"on_property": "title",
"method": "match"
},
{
"pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
"target": "version",
"flags": "i",
"on_property": "title",
"method": "match"
},
{
"pattern": "^(?:[^A-Za-z0-9]*)(action)\\b.*",
"target": "action",
"flags": "i",
"on_property": "title",
"method": "match"
}
],
"template": "## [Unreleased]\n\n#{{CHANGELOG}}\n\n---",
"pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
"empty_template": "- no changes"
}

View File

@@ -38,7 +38,7 @@ jobs:
- name: Install MCP Package
run: npm run build:mcp
- name: Create empty auth.json file
run: |
mkdir -p api/data
@@ -61,9 +61,4 @@ jobs:
run: cd api && npm run test:ci
- name: Run librechat-data-provider unit tests
run: cd packages/data-provider && npm run test:ci
- name: Run linters
uses: wearerequired/lint-action@v2
with:
eslint: true
run: cd packages/data-provider && npm run test:ci

73
.github/workflows/eslint-ci.yml vendored Normal file
View File

@@ -0,0 +1,73 @@
name: ESLint Code Quality Checks
on:
pull_request:
branches:
- main
- dev
- release/*
paths:
- 'api/**'
- 'client/**'
jobs:
eslint_checks:
name: Run ESLint Linting
runs-on: ubuntu-latest
permissions:
contents: read
security-events: write
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js 20.x
uses: actions/setup-node@v4
with:
node-version: 20
cache: npm
- name: Install dependencies
run: npm ci
# Run ESLint on changed files within the api/ and client/ directories.
- name: Run ESLint on changed files
env:
SARIF_ESLINT_IGNORE_SUPPRESSED: "true"
run: |
# Extract the base commit SHA from the pull_request event payload.
BASE_SHA=$(jq --raw-output .pull_request.base.sha "$GITHUB_EVENT_PATH")
echo "Base commit SHA: $BASE_SHA"
# Get changed files (only JS/TS files in api/ or client/)
CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRTUXB "$BASE_SHA" HEAD | grep -E '^(api|client)/.*\.(js|jsx|ts|tsx)$' || true)
# Debug output
echo "Changed files:"
echo "$CHANGED_FILES"
# Ensure there are files to lint before running ESLint
if [[ -z "$CHANGED_FILES" ]]; then
echo "No matching files changed. Skipping ESLint."
echo "UPLOAD_SARIF=false" >> $GITHUB_ENV
exit 0
fi
# Set variable to allow SARIF upload
echo "UPLOAD_SARIF=true" >> $GITHUB_ENV
# Run ESLint
npx eslint --no-error-on-unmatched-pattern \
--config eslint.config.mjs \
--format @microsoft/eslint-formatter-sarif \
--output-file eslint-results.sarif $CHANGED_FILES || true
- name: Upload analysis results to GitHub
if: env.UPLOAD_SARIF == 'true'
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: eslint-results.sarif
wait-for-processing: true

View File

@@ -0,0 +1,94 @@
name: Generate Release Changelog PR
on:
push:
tags:
- 'v*.*.*'
jobs:
generate-release-changelog-pr:
permissions:
contents: write # Needed for pushing commits and creating branches.
pull-requests: write
runs-on: ubuntu-latest
steps:
# 1. Checkout the repository (with full history).
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
# 2. Generate the release changelog using our custom configuration.
- name: Generate Release Changelog
id: generate_release
uses: mikepenz/release-changelog-builder-action@v5.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
configuration: ".github/configuration-release.json"
owner: ${{ github.repository_owner }}
repo: ${{ github.event.repository.name }}
outputFile: CHANGELOG-release.md
# 3. Update the main CHANGELOG.md:
# - If it doesn't exist, create it with a basic header.
# - Remove the "Unreleased" section (if present).
# - Prepend the new release changelog above previous releases.
# - Remove all temporary files before committing.
- name: Update CHANGELOG.md
run: |
# Determine the release tag, e.g. "v1.2.3"
TAG=${GITHUB_REF##*/}
echo "Using release tag: $TAG"
# Ensure CHANGELOG.md exists; if not, create a basic header.
if [ ! -f CHANGELOG.md ]; then
echo "# Changelog" > CHANGELOG.md
echo "" >> CHANGELOG.md
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
echo "" >> CHANGELOG.md
fi
echo "Updating CHANGELOG.md…"
# Remove the "Unreleased" section (from "## [Unreleased]" until the first occurrence of '---') if it exists.
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
awk '/^## \[Unreleased\]/{flag=1} flag && /^---/{flag=0; next} !flag' CHANGELOG.md > CHANGELOG.cleaned
else
cp CHANGELOG.md CHANGELOG.cleaned
fi
# Split the cleaned file into:
# - header.md: content before the first release header ("## [v...").
# - tail.md: content from the first release header onward.
awk '/^## \[v/{exit} {print}' CHANGELOG.cleaned > header.md
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.cleaned > tail.md
# Combine header, the new release changelog, and the tail.
echo "Combining updated changelog parts..."
cat header.md CHANGELOG-release.md > CHANGELOG.md.new
echo "" >> CHANGELOG.md.new
cat tail.md >> CHANGELOG.md.new
mv CHANGELOG.md.new CHANGELOG.md
# Remove temporary files.
rm -f CHANGELOG.cleaned header.md tail.md CHANGELOG-release.md
echo "Final CHANGELOG.md content:"
cat CHANGELOG.md
# 4. Create (or update) the Pull Request with the updated CHANGELOG.md.
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
commit-message: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
base: main
branch: "changelog/${GITHUB_REF##*/}"
reviewers: danny-avila
title: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
body: |
**Description**:
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${GITHUB_REF##*/} above previous releases.

View File

@@ -0,0 +1,106 @@
name: Generate Unreleased Changelog PR
on:
schedule:
- cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC
jobs:
generate-unreleased-changelog-pr:
permissions:
contents: write # Needed for pushing commits and creating branches.
pull-requests: write
runs-on: ubuntu-latest
steps:
# 1. Checkout the repository on main.
- name: Checkout Repository on Main
uses: actions/checkout@v4
with:
ref: main
fetch-depth: 0
# 4. Get the latest version tag.
- name: Get Latest Tag
id: get_latest_tag
run: |
LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1) || echo "none")
echo "Latest tag: $LATEST_TAG"
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
# 5. Generate the Unreleased changelog.
- name: Generate Unreleased Changelog
id: generate_unreleased
uses: mikepenz/release-changelog-builder-action@v5.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
configuration: ".github/configuration-unreleased.json"
owner: ${{ github.repository_owner }}
repo: ${{ github.event.repository.name }}
outputFile: CHANGELOG-unreleased.md
fromTag: ${{ steps.get_latest_tag.outputs.tag }}
toTag: main
# 7. Update CHANGELOG.md with the new Unreleased section.
- name: Update CHANGELOG.md
id: update_changelog
run: |
# Create CHANGELOG.md if it doesn't exist.
if [ ! -f CHANGELOG.md ]; then
echo "# Changelog" > CHANGELOG.md
echo "" >> CHANGELOG.md
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
echo "" >> CHANGELOG.md
fi
echo "Updating CHANGELOG.md…"
# Extract content before the "## [Unreleased]" (or first version header if missing).
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
awk '/^## \[Unreleased\]/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
else
awk '/^## \[v/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
fi
# Append the generated Unreleased changelog.
echo "" >> CHANGELOG_TMP.md
cat CHANGELOG-unreleased.md >> CHANGELOG_TMP.md
echo "" >> CHANGELOG_TMP.md
# Append the remainder of the original changelog (starting from the first version header).
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.md >> CHANGELOG_TMP.md
# Replace the old file with the updated file.
mv CHANGELOG_TMP.md CHANGELOG.md
# Remove the temporary generated file.
rm -f CHANGELOG-unreleased.md
echo "Final CHANGELOG.md:"
cat CHANGELOG.md
# 8. Check if CHANGELOG.md has any updates.
- name: Check for CHANGELOG.md changes
id: changelog_changes
run: |
if git diff --quiet CHANGELOG.md; then
echo "has_changes=false" >> $GITHUB_OUTPUT
else
echo "has_changes=true" >> $GITHUB_OUTPUT
fi
# 9. Create (or update) the Pull Request only if there are changes.
- name: Create Pull Request
if: steps.changelog_changes.outputs.has_changes == 'true'
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
base: main
branch: "changelog/unreleased-update"
sign-commits: true
commit-message: "action: update Unreleased changelog"
title: "action: update Unreleased changelog"
body: |
**Description**:
- This PR updates the Unreleased section in CHANGELOG.md.
- It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}),
regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.

93
.github/workflows/i18n-unused-keys.yml vendored Normal file
View File

@@ -0,0 +1,93 @@
name: Detect Unused i18next Strings
on:
pull_request:
paths:
- "client/src/**"
- "api/**"
jobs:
detect-unused-i18n-keys:
runs-on: ubuntu-latest
permissions:
pull-requests: write # Required for posting PR comments
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Find unused i18next keys
id: find-unused
run: |
echo "🔍 Scanning for unused i18next keys..."
# Define paths
I18N_FILE="client/src/locales/en/translation.json"
SOURCE_DIRS=("client/src" "api")
# Check if translation file exists
if [[ ! -f "$I18N_FILE" ]]; then
echo "::error title=Missing i18n File::Translation file not found: $I18N_FILE"
exit 1
fi
# Extract all keys from the JSON file
KEYS=$(jq -r 'keys[]' "$I18N_FILE")
# Track unused keys
UNUSED_KEYS=()
# Check if each key is used in the source code
for KEY in $KEYS; do
FOUND=false
for DIR in "${SOURCE_DIRS[@]}"; do
if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
FOUND=true
break
fi
done
if [[ "$FOUND" == false ]]; then
UNUSED_KEYS+=("$KEY")
fi
done
# Output results
if [[ ${#UNUSED_KEYS[@]} -gt 0 ]]; then
echo "🛑 Found ${#UNUSED_KEYS[@]} unused i18n keys:"
echo "unused_keys=$(echo "${UNUSED_KEYS[@]}" | jq -R -s -c 'split(" ")')" >> $GITHUB_ENV
for KEY in "${UNUSED_KEYS[@]}"; do
echo "::warning title=Unused i18n Key::'$KEY' is defined but not used in the codebase."
done
else
echo "✅ No unused i18n keys detected!"
echo "unused_keys=[]" >> $GITHUB_ENV
fi
- name: Post verified comment on PR
if: env.unused_keys != '[]'
run: |
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
# Format the unused keys list as checkboxes for easy manual checking.
FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- [ ] `/;s/$/`/' )
COMMENT_BODY=$(cat <<EOF
### 🚨 Unused i18next Keys Detected
The following translation keys are defined in \`translation.json\` but are **not used** in the codebase:
$FILTERED_KEYS
⚠️ **Please remove these unused keys to keep the translation files clean.**
EOF
)
gh api "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \
-f body="$COMMENT_BODY" \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fail workflow if unused keys found
if: env.unused_keys != '[]'
run: exit 1

72
.github/workflows/locize-i18n-sync.yml vendored Normal file
View File

@@ -0,0 +1,72 @@
name: Sync Locize Translations & Create Translation PR
on:
push:
branches: [main]
repository_dispatch:
types: [locize/versionPublished]
jobs:
sync-translations:
name: Sync Translation Keys with Locize
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set Up Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install locize CLI
run: npm install -g locize-cli
# Sync translations (Push missing keys & remove deleted ones)
- name: Sync Locize with Repository
if: ${{ github.event_name == 'push' }}
run: |
cd client/src/locales
locize sync --api-key ${{ secrets.LOCIZE_API_KEY }} --project-id ${{ secrets.LOCIZE_PROJECT_ID }} --language en
# When triggered by repository_dispatch, skip sync step.
- name: Skip sync step on non-push events
if: ${{ github.event_name != 'push' }}
run: echo "Skipping sync as the event is not a push."
create-pull-request:
name: Create Translation PR on Version Published
runs-on: ubuntu-latest
needs: sync-translations
permissions:
contents: write
pull-requests: write
steps:
# 1. Check out the repository.
- name: Checkout Repository
uses: actions/checkout@v4
# 2. Download translation files from locize.
- name: Download Translations from locize
uses: locize/download@v1
with:
project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
path: "client/src/locales"
# 3. Create a Pull Request using built-in functionality.
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
commit-message: "🌍 i18n: Update translation.json with latest translations"
base: main
branch: i18n/locize-translation-update
reviewers: danny-avila
title: "🌍 i18n: Update translation.json with latest translations"
body: |
**Description**:
- 🎯 **Objective**: Update `translation.json` with the latest translations from locize.
- 🔍 **Details**: This PR is automatically generated upon receiving a versionPublished event with version "latest". It reflects the newest translations provided by locize.
- ✅ **Status**: Ready for review.
labels: "🌍 i18n"

153
.github/workflows/unused-packages.yml vendored Normal file
View File

@@ -0,0 +1,153 @@
name: Detect Unused NPM Packages
on:
pull_request:
paths:
- 'package.json'
- 'package-lock.json'
- 'client/**'
- 'api/**'
jobs:
detect-unused-packages:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: actions/checkout@v4
- name: Use Node.js 20.x
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Install depcheck
run: npm install -g depcheck
- name: Validate JSON files
run: |
for FILE in package.json client/package.json api/package.json; do
if [[ -f "$FILE" ]]; then
jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1)
fi
done
- name: Extract Dependencies Used in Scripts
id: extract-used-scripts
run: |
extract_deps_from_scripts() {
local package_file=$1
if [[ -f "$package_file" ]]; then
jq -r '.scripts | to_entries[].value' "$package_file" | \
grep -oE '([a-zA-Z0-9_-]+)' | sort -u > used_scripts.txt
else
touch used_scripts.txt
fi
}
extract_deps_from_scripts "package.json"
mv used_scripts.txt root_used_deps.txt
extract_deps_from_scripts "client/package.json"
mv used_scripts.txt client_used_deps.txt
extract_deps_from_scripts "api/package.json"
mv used_scripts.txt api_used_deps.txt
- name: Extract Dependencies Used in Source Code
id: extract-used-code
run: |
extract_deps_from_code() {
local folder=$1
local output_file=$2
if [[ -d "$folder" ]]; then
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,mjs,cjs} | \
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,mjs,cjs} | \
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
sort -u "$output_file" -o "$output_file"
else
touch "$output_file"
fi
}
extract_deps_from_code "." root_used_code.txt
extract_deps_from_code "client" client_used_code.txt
extract_deps_from_code "api" api_used_code.txt
- name: Run depcheck for root package.json
id: check-root
run: |
if [[ -f "package.json" ]]; then
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt | sort) || echo "")
echo "ROOT_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
fi
- name: Run depcheck for client/package.json
id: check-client
run: |
if [[ -f "client/package.json" ]]; then
chmod -R 755 client
cd client
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt | sort) || echo "")
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
cd ..
fi
- name: Run depcheck for api/package.json
id: check-api
run: |
if [[ -f "api/package.json" ]]; then
chmod -R 755 api
cd api
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt | sort) || echo "")
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
cd ..
fi
- name: Post comment on PR if unused dependencies are found
if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
run: |
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
ROOT_LIST=$(echo "$ROOT_UNUSED" | awk '{print "- `" $0 "`"}')
CLIENT_LIST=$(echo "$CLIENT_UNUSED" | awk '{print "- `" $0 "`"}')
API_LIST=$(echo "$API_UNUSED" | awk '{print "- `" $0 "`"}')
COMMENT_BODY=$(cat <<EOF
### 🚨 Unused NPM Packages Detected
The following **unused dependencies** were found:
$(if [[ ! -z "$ROOT_UNUSED" ]]; then echo "#### 📂 Root \`package.json\`"; echo ""; echo "$ROOT_LIST"; echo ""; fi)
$(if [[ ! -z "$CLIENT_UNUSED" ]]; then echo "#### 📂 Client \`client/package.json\`"; echo ""; echo "$CLIENT_LIST"; echo ""; fi)
$(if [[ ! -z "$API_UNUSED" ]]; then echo "#### 📂 API \`api/package.json\`"; echo ""; echo "$API_LIST"; echo ""; fi)
⚠️ **Please remove these unused dependencies to keep your project clean.**
EOF
)
gh api "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \
-f body="$COMMENT_BODY" \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fail workflow if unused dependencies found
if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
run: exit 1

3
.gitignore vendored
View File

@@ -105,4 +105,5 @@ auth.json
uploads/
# owner
release/
release/
!/client/src/@types/i18next.d.ts

19
.prettierrc Normal file
View File

@@ -0,0 +1,19 @@
{
"tailwindConfig": "./client/tailwind.config.mjs",
"printWidth": 100,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": true,
"trailingComma": "all",
"arrowParens": "always",
"embeddedLanguageFormatting": "auto",
"insertPragma": false,
"proseWrap": "preserve",
"quoteProps": "as-needed",
"requirePragma": false,
"rangeStart": 0,
"endOfLine": "auto",
"jsxSingleQuote": false,
"plugins": ["prettier-plugin-tailwindcss"]
}

View File

@@ -1,4 +1,4 @@
# v0.7.6
# v0.7.7-rc1
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,8 +1,8 @@
# Dockerfile.multi
# v0.7.6
# v0.7.7-rc1
# Base for all builds
FROM node:20-alpine AS base
FROM node:20-alpine AS base-min
WORKDIR /app
RUN apk --no-cache add curl
RUN npm config set fetch-retry-maxtimeout 600000 && \
@@ -13,6 +13,10 @@ COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/mcp/package*.json ./packages/mcp/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
# Install all dependencies for every build
FROM base-min AS base
WORKDIR /app
RUN npm ci
# Build data-provider
@@ -20,7 +24,6 @@ FROM base AS data-provider-build
WORKDIR /app/packages/data-provider
COPY packages/data-provider ./
RUN npm run build
RUN npm prune --production
# Build mcp package
FROM base AS mcp-build
@@ -28,7 +31,6 @@ WORKDIR /app/packages/mcp
COPY packages/mcp ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
RUN npm prune --production
# Client build
FROM base AS client-build
@@ -37,18 +39,18 @@ COPY client ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build
RUN npm prune --production
# API setup (including client dist)
FROM base AS api-build
FROM base-min AS api-build
WORKDIR /app
# Install only production deps
RUN npm ci --omit=dev
COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
RUN npm prune --production
EXPOSE 3080
ENV HOST=0.0.0.0
CMD ["node", "server/index.js"]
CMD ["node", "server/index.js"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2024 LibreChat
Copyright (c) 2025 LibreChat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -38,6 +38,15 @@
</a>
</p>
<p align="center">
<a href="https://www.librechat.ai/docs/translation">
<img
src="https://img.shields.io/badge/dynamic/json.svg?style=for-the-badge&color=2096F3&label=locize&query=%24.translatedPercentage&url=https://api.locize.app/badgedata/4cb2598b-ed4d-469c-9b04-2ed531a8cb45&suffix=%+translated"
alt="Translation Progress">
</a>
</p>
# ✨ Features
- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features
@@ -79,6 +88,9 @@
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
- 🧠 **Reasoning UI**:
- Dynamic Reasoning UI for Chain-of-Thought/Reasoning AI models like DeepSeek-R1
- 🎨 **Customizable Interface**:
- Customizable Dropdown & Interface that adapts to both power users and newcomers
@@ -114,7 +126,8 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.5.png)](https://www.youtube.com/watch?v=IDukQ7a2f3U)
[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.6.gif)](https://www.youtube.com/watch?v=ilfwGQtJNlI)
Click on the thumbnail to open the video☝
---
@@ -166,6 +179,8 @@ Contributions, suggestions, bug reports and fixes are welcome!
For new features, components, or extensions, please open an issue and discuss before sending a PR.
If you'd like to help translate LibreChat into your language, we'd love your contribution! Improving our translations not only makes LibreChat more accessible to users around the world but also enhances the overall user experience. Please check out our [Translation Guide](https://www.librechat.ai/docs/translation).
---
## 💖 This project exists in its current state thanks to all the people who contribute
@@ -173,3 +188,15 @@ For new features, components, or extensions, please open an issue and discuss be
<a href="https://github.com/danny-avila/LibreChat/graphs/contributors">
<img src="https://contrib.rocks/image?repo=danny-avila/LibreChat" />
</a>
---
## 🎉 Special Thanks
We thank [Locize](https://locize.com) for their translation management tools that support multiple languages in LibreChat.
<p align="center">
<a href="https://locize.com" target="_blank" rel="noopener noreferrer">
<img src="https://locize.com/img/locize_color.svg" alt="Locize Logo" height="50">
</a>
</p>

View File

@@ -1,112 +0,0 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { logger } = require('~/config');
const askBing = async ({
text,
parentMessageId,
conversationId,
jailbreak,
jailbreakConversationId,
context,
systemMessage,
conversationSignature,
clientId,
invocationId,
toneStyle,
key: expiresAt,
onProgress,
userId,
}) => {
const isUserProvided = process.env.BINGAI_TOKEN === 'user_provided';
let key = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bingAI);
key = await getUserKey({ userId, name: 'bingAI' });
}
const { BingAIClient } = await import('nodejs-gpt');
const store = {
store: new KeyvFile({ filename: './data/cache.json' }),
};
const bingAIClient = new BingAIClient({
// "_U" cookie from bing.com
// userToken:
// isUserProvided ? key : process.env.BINGAI_TOKEN ?? null,
// If the above doesn't work, provide all your cookies as a string instead
cookies: isUserProvided ? key : process.env.BINGAI_TOKEN ?? null,
debug: false,
cache: store,
host: process.env.BINGAI_HOST || null,
proxy: process.env.PROXY || null,
});
let options = {};
if (jailbreakConversationId == 'false') {
jailbreakConversationId = false;
}
if (jailbreak) {
options = {
jailbreakConversationId: jailbreakConversationId || jailbreak,
context,
systemMessage,
parentMessageId,
toneStyle,
onProgress,
clientOptions: {
features: {
genImage: {
server: {
enable: true,
type: 'markdown_list',
},
},
},
},
};
} else {
options = {
conversationId,
context,
systemMessage,
parentMessageId,
toneStyle,
onProgress,
clientOptions: {
features: {
genImage: {
server: {
enable: true,
type: 'markdown_list',
},
},
},
},
};
// don't give those parameters for new conversation
// for new conversation, conversationSignature always is null
if (conversationSignature) {
options.encryptedConversationSignature = conversationSignature;
options.clientId = clientId;
options.invocationId = invocationId;
}
}
logger.debug('bing options', options);
const res = await bingAIClient.sendMessage(text, options);
return res;
// for reference:
// https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/demos/use-bing-client.js
};
module.exports = { askBing };

View File

@@ -1,57 +0,0 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { Constants, EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
const browserClient = async ({
text,
parentMessageId,
conversationId,
model,
key: expiresAt,
onProgress,
onEventMessage,
abortController,
userId,
}) => {
const isUserProvided = process.env.CHATGPT_TOKEN === 'user_provided';
let key = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.chatGPTBrowser);
key = await getUserKey({ userId, name: 'chatGPTBrowser' });
}
const { ChatGPTBrowserClient } = await import('nodejs-gpt');
const store = {
store: new KeyvFile({ filename: './data/cache.json' }),
};
const clientOptions = {
// Warning: This will expose your access token to a third party. Consider the risks before using this.
reverseProxyUrl:
process.env.CHATGPT_REVERSE_PROXY ?? 'https://ai.fakeopen.com/api/conversation',
// Access token from https://chat.openai.com/api/auth/session
accessToken: isUserProvided ? key : process.env.CHATGPT_TOKEN ?? null,
model: model,
debug: false,
proxy: process.env.PROXY ?? null,
user: userId,
};
const client = new ChatGPTBrowserClient(clientOptions, store);
let options = { onProgress, onEventMessage, abortController };
if (!!parentMessageId && !!conversationId) {
options = { ...options, parentMessageId, conversationId };
}
if (parentMessageId === Constants.NO_PARENT) {
delete options.conversationId;
}
const res = await client.sendMessage(text, options);
return res;
};
module.exports = { browserClient };

View File

@@ -1,6 +1,5 @@
const Anthropic = require('@anthropic-ai/sdk');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
Constants,
EModelEndpoint,
@@ -8,7 +7,7 @@ const {
getResponseSender,
validateVisionModel,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { SplitStreamHandler: _Handler, GraphEvents } = require('@librechat/agents');
const {
truncateText,
formatMessage,
@@ -17,16 +16,30 @@ const {
parseParamFromPrompt,
createContextHandlers,
} = require('./prompts');
const {
getClaudeHeaders,
configureReasoning,
checkPromptCacheSupport,
} = require('~/server/services/Endpoints/anthropic/helpers');
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const Tokenizer = require('~/server/services/Tokenizer');
const { logger, sendEvent } = require('~/config');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
const tokenizersCache = {};
class SplitStreamHandler extends _Handler {
getDeltaContent(chunk) {
return (chunk?.delta?.text ?? chunk?.completion) || '';
}
getReasoningDelta(chunk) {
return chunk?.delta?.thinking || '';
}
}
/** Helper function to introduce a delay before retrying */
function delayBeforeRetry(attempts, baseDelay = 1000) {
@@ -70,6 +83,8 @@ class AnthropicClient extends BaseClient {
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
/** @type {SplitStreamHandler | undefined} */
this.streamHandler;
}
setOptions(options) {
@@ -99,9 +114,10 @@ class AnthropicClient extends BaseClient {
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.includes('claude-3');
this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet');
this.supportsCacheControl =
this.options.promptCache && this.checkPromptCacheSupport(modelMatch);
this.isLegacyOutput = !(
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
);
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
@@ -127,7 +143,7 @@ class AnthropicClient extends BaseClient {
this.options.endpointType ?? this.options.endpoint,
this.options.endpointTokenConfig,
) ??
1500;
anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
@@ -149,7 +165,6 @@ class AnthropicClient extends BaseClient {
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
return this;
}
@@ -174,18 +189,9 @@ class AnthropicClient extends BaseClient {
options.baseURL = this.options.reverseProxyUrl;
}
if (
this.supportsCacheControl &&
requestOptions?.model &&
requestOptions.model.includes('claude-3-5-sonnet')
) {
options.defaultHeaders = {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
};
} else if (this.supportsCacheControl) {
options.defaultHeaders = {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
if (headers) {
options.defaultHeaders = headers;
}
return new Anthropic(options);
@@ -419,7 +425,7 @@ class AnthropicClient extends BaseClient {
}
let { context: messagesInWindow, remainingContextTokens } =
await this.getMessagesWithinTokenLimit(formattedMessages);
await this.getMessagesWithinTokenLimit({ messages: formattedMessages });
const tokenCountMap = orderedMessages
.slice(orderedMessages.length - messagesInWindow.length)
@@ -671,29 +677,38 @@ class AnthropicClient extends BaseClient {
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
return useMessages ?? this.useMessages
return (useMessages ?? this.useMessages)
? await client.messages.create(options)
: await client.completions.create(options);
}
getMessageMapMethod() {
/**
* @param {TMessage} msg
*/
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
}
return msg;
};
}
/**
* @param {string} modelName
* @returns {boolean}
* @param {string[]} [intermediateReply]
* @returns {string}
*/
checkPromptCacheSupport(modelName) {
const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
if (modelMatch.includes('claude-3-5-sonnet-latest')) {
return false;
getStreamText(intermediateReply) {
if (!this.streamHandler) {
return intermediateReply?.join('') ?? '';
}
if (
modelMatch === 'claude-3-5-sonnet' ||
modelMatch === 'claude-3-5-haiku' ||
modelMatch === 'claude-3-haiku' ||
modelMatch === 'claude-3-opus'
) {
return true;
}
return false;
const reasoningText = this.streamHandler.reasoningTokens.join('');
const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
}
async sendCompletion(payload, { onProgress, abortController }) {
@@ -713,7 +728,6 @@ class AnthropicClient extends BaseClient {
user_id: this.user,
};
let text = '';
const {
stream,
model,
@@ -724,24 +738,37 @@ class AnthropicClient extends BaseClient {
topK: top_k,
} = this.modelOptions;
const requestOptions = {
let requestOptions = {
model,
stream: stream || true,
stop_sequences,
temperature,
metadata,
top_p,
top_k,
};
if (!/claude-3[-.]7/.test(model)) {
if (top_p !== undefined) {
requestOptions.top_p = top_p;
}
if (top_k !== undefined) {
requestOptions.top_k = top_k;
}
}
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
requestOptions.max_tokens =
maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
} else {
requestOptions.prompt = payload;
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
}
requestOptions = configureReasoning(requestOptions, {
thinking: this.options.thinking,
thinkingBudget: this.options.thinkingBudget,
});
if (this.systemMessage && this.supportsCacheControl === true) {
requestOptions.system = [
{
@@ -759,13 +786,17 @@ class AnthropicClient extends BaseClient {
}
logger.debug('[AnthropicClient]', { ...requestOptions });
this.streamHandler = new SplitStreamHandler({
accumulate: true,
runId: this.responseMessageId,
handlers: {
[GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
},
});
const handleChunk = (currentChunk) => {
if (currentChunk) {
text += currentChunk;
onProgress(currentChunk);
}
};
let intermediateReply = this.streamHandler.tokens;
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
@@ -786,22 +817,15 @@ class AnthropicClient extends BaseClient {
});
for await (const completion of response) {
// Handle each completion as before
const type = completion?.type ?? '';
if (tokenEventTypes.has(type)) {
logger.debug(`[AnthropicClient] ${type}`, completion);
this[type] = completion;
}
if (completion?.delta?.text) {
handleChunk(completion.delta.text);
} else if (completion.completion) {
handleChunk(completion.completion);
}
this.streamHandler.handle(completion);
await sleep(streamRate);
}
// Successful processing, exit loop
break;
} catch (error) {
attempts += 1;
@@ -811,6 +835,10 @@ class AnthropicClient extends BaseClient {
if (attempts < maxRetries) {
await delayBeforeRetry(attempts, 350);
} else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
} else {
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
}
@@ -826,8 +854,7 @@ class AnthropicClient extends BaseClient {
}
await processResponse.bind(this)();
return text.trim();
return this.getStreamText(intermediateReply);
}
getSaveOptions() {
@@ -837,6 +864,8 @@ class AnthropicClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
promptCache: this.options.promptCache,
thinking: this.options.thinking,
thinkingBudget: this.options.thinkingBudget,
resendFiles: this.options.resendFiles,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
@@ -849,22 +878,18 @@ class AnthropicClient extends BaseClient {
logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions');
}
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
if (tokenizersCache[encoding]) {
return tokenizersCache[encoding];
}
let tokenizer;
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
return tokenizer;
getEncoding() {
return 'cl100k_base';
}
/**
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
* @param {string} text - The text to get the token count for.
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
}
/**

View File

@@ -4,16 +4,16 @@ const {
supportsBalanceCheck,
isAgentsEndpoint,
isParamEndpoint,
EModelEndpoint,
excludedKeys,
ErrorTypes,
Constants,
CacheKeys,
Time,
} = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const { truncateToolCallOutputs } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
const { getFiles } = require('~/models/File');
const { getLogStores } = require('~/cache');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -52,6 +52,18 @@ class BaseClient {
this.outputTokensKey = 'completion_tokens';
/** @type {Set<string>} */
this.savedMessageIds = new Set();
/**
* Flag to determine if the client re-submitted the latest assistant message.
* @type {boolean | undefined} */
this.continued;
/**
* Flag to determine if the client has already fetched the conversation while saving new messages.
* @type {boolean | undefined} */
this.fetchedConvo;
/** @type {TMessage[]} */
this.currentMessages = [];
/** @type {import('librechat-data-provider').VisionModes | undefined} */
this.visionMode;
}
setOptions() {
@@ -95,7 +107,7 @@ class BaseClient {
* @returns {number}
*/
getTokenCountForResponse(responseMessage) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', responseMessage);
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', responseMessage);
}
/**
@@ -106,7 +118,7 @@ class BaseClient {
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
promptTokens,
completionTokens,
});
@@ -262,17 +274,24 @@ class BaseClient {
/**
* Adds instructions to the messages array. If the instructions object is empty or undefined,
* the original messages array is returned. Otherwise, the instructions are added to the messages
* array, preserving the last message at the end.
* array either at the beginning (default) or preserving the last message at the end.
*
* @param {Array} messages - An array of messages.
* @param {Object} instructions - An object containing instructions to be added to the messages.
* @param {boolean} [beforeLast=false] - If true, adds instructions before the last message; if false, adds at the beginning.
* @returns {Array} An array containing messages and instructions, or the original messages if instructions are empty.
*/
addInstructions(messages, instructions) {
const payload = [];
addInstructions(messages, instructions, beforeLast = false) {
if (!instructions || Object.keys(instructions).length === 0) {
return messages;
}
if (!beforeLast) {
return [instructions, ...messages];
}
// Legacy behavior: add instructions before the last message
const payload = [];
if (messages.length > 1) {
payload.push(...messages.slice(0, -1));
}
@@ -287,6 +306,9 @@ class BaseClient {
}
async handleTokenCountMap(tokenCountMap) {
if (this.clientName === EModelEndpoint.agents) {
return;
}
if (this.currentMessages.length === 0) {
return;
}
@@ -335,25 +357,38 @@ class BaseClient {
* If the token limit would be exceeded by adding a message, that message is not added to the context and remains in the original array.
* The method uses `push` and `pop` operations for efficient array manipulation, and reverses the context array at the end to maintain the original order of the messages.
*
* @param {Array} _messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest.
* @param {number} [maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`.
* @returns {Object} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
* @param {Object} params
* @param {TMessage[]} params.messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest.
* @param {number} [params.maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`.
* @param {{ role: 'system', content: text, tokenCount: number }} [params.instructions] - Instructions already added to the context at index 0.
* @returns {Promise<{
* context: TMessage[],
* remainingContextTokens: number,
* messagesToRefine: TMessage[],
* summaryIndex: number,
* }>} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
* `context` is an array of messages that fit within the token limit.
* `summaryIndex` is the index of the first message in the `messagesToRefine` array.
* `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context.
* `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
*/
async getMessagesWithinTokenLimit(_messages, maxContextTokens) {
async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) {
// Every reply is primed with <|start|>assistant<|message|>, so we
// start with 3 tokens for the label after all messages have been counted.
let currentTokenCount = 3;
let summaryIndex = -1;
let remainingContextTokens = maxContextTokens ?? this.maxContextTokens;
let currentTokenCount = 3;
const instructionsTokenCount = instructions?.tokenCount ?? 0;
let remainingContextTokens =
(maxContextTokens ?? this.maxContextTokens) - instructionsTokenCount;
const messages = [..._messages];
const context = [];
if (currentTokenCount < remainingContextTokens) {
while (messages.length > 0 && currentTokenCount < remainingContextTokens) {
if (messages.length === 1 && instructions) {
break;
}
const poppedMessage = messages.pop();
const { tokenCount } = poppedMessage;
@@ -367,6 +402,11 @@ class BaseClient {
}
}
if (instructions) {
context.push(_messages[0]);
messages.shift();
}
const prunedMemory = messages;
summaryIndex = prunedMemory.length - 1;
remainingContextTokens -= currentTokenCount;
@@ -391,12 +431,38 @@ class BaseClient {
if (instructions) {
({ tokenCount, ..._instructions } = instructions);
}
_instructions && logger.debug('[BaseClient] instructions tokenCount: ' + tokenCount);
let payload = this.addInstructions(formattedMessages, _instructions);
if (tokenCount && tokenCount > this.maxContextTokens) {
const info = `${tokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Instructions token count exceeds max token count (${info}).`);
throw new Error(errorMessage);
}
if (this.clientName === EModelEndpoint.agents) {
const { dbMessages, editedIndices } = truncateToolCallOutputs(
orderedMessages,
this.maxContextTokens,
this.getTokenCountForMessage.bind(this),
);
if (editedIndices.length > 0) {
logger.debug('[BaseClient] Truncated tool call outputs:', editedIndices);
for (const index of editedIndices) {
formattedMessages[index].content = dbMessages[index].content;
}
orderedMessages = dbMessages;
}
}
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
await this.getMessagesWithinTokenLimit(orderedWithInstructions);
await this.getMessagesWithinTokenLimit({
messages: orderedWithInstructions,
instructions,
});
logger.debug('[BaseClient] Context Count (1/2)', {
remainingContextTokens,
@@ -408,7 +474,9 @@ class BaseClient {
let { shouldSummarize } = this;
// Calculate the difference in length to determine how many messages were discarded if any
const { length } = payload;
let payload;
let { length } = formattedMessages;
length += instructions != null ? 1 : 0;
const diff = length - context.length;
const firstMessage = orderedWithInstructions[0];
const usePrevSummary =
@@ -418,18 +486,31 @@ class BaseClient {
this.previous_summary.messageId === firstMessage.messageId;
if (diff > 0) {
payload = payload.slice(diff);
payload = formattedMessages.slice(diff);
logger.debug(
`[BaseClient] Difference between original payload (${length}) and context (${context.length}): ${diff}`,
);
}
payload = this.addInstructions(payload ?? formattedMessages, _instructions);
const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1];
if (payload.length === 0 && !shouldSummarize && latestMessage) {
const info = `${latestMessage.tokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Prompt token count exceeds max token count (${info}).`);
throw new Error(errorMessage);
} else if (
_instructions &&
payload.length === 1 &&
payload[0].content === _instructions.content
) {
const info = `${tokenCount + 3} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(
`Including instructions, the prompt token count exceeds remaining max token count (${info}).`,
);
throw new Error(errorMessage);
}
if (usePrevSummary) {
@@ -518,6 +599,7 @@ class BaseClient {
} else {
latestMessage.text = generation;
}
this.continued = true;
} else {
this.currentMessages.push(userMessage);
}
@@ -625,7 +707,7 @@ class BaseClient {
await this.updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts });
} else {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
completionTokens = this.getTokenCount(completion);
completionTokens = responseMessage.tokenCount;
}
await this.recordTokenUsage({ promptTokens, completionTokens, usage });
@@ -649,15 +731,6 @@ class BaseClient {
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
this.savedMessageIds.add(responseMessage.messageId);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
delete responseMessage.tokenCount;
return responseMessage;
}
@@ -795,16 +868,39 @@ class BaseClient {
return { message: savedMessage };
}
const conversation = await saveConvo(
this.options.req,
{
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
);
const fieldsToKeep = {
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
};
const existingConvo =
this.fetchedConvo === true
? null
: await getConvo(this.options.req?.user?.id, message.conversationId);
const unsetFields = {};
if (existingConvo != null) {
this.fetchedConvo = true;
for (const key in existingConvo) {
if (!key) {
continue;
}
if (excludedKeys.has(key)) {
continue;
}
if (endpointOptions?.[key] === undefined) {
unsetFields[key] = 1;
}
}
}
const conversation = await saveConvo(this.options.req, fieldsToKeep, {
context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
unsetFields,
});
return { message: savedMessage, conversation };
}
@@ -929,6 +1025,24 @@ class BaseClient {
continue;
}
if (item.type === 'tool_call' && item.tool_call != null) {
const toolName = item.tool_call?.name || '';
if (toolName != null && toolName && typeof toolName === 'string') {
numTokens += this.getTokenCount(toolName);
}
const args = item.tool_call?.args || '';
if (args != null && args && typeof args === 'string') {
numTokens += this.getTokenCount(args);
}
const output = item.tool_call?.output || '';
if (output != null && output && typeof output === 'string') {
numTokens += this.getTokenCount(output);
}
continue;
}
const nestedValue = item[item.type];
if (!nestedValue) {
@@ -1011,7 +1125,7 @@ class BaseClient {
file_id: { $in: fileIds },
});
await this.addImageURLs(message, files);
await this.addImageURLs(message, files, this.visionMode);
this.message_file_map[message.messageId] = files;
return message;

View File

@@ -13,7 +13,6 @@ const {
const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
const { createContextHandlers } = require('./prompts');
const { createCoherePayload } = require('./llm');
const { Agent, ProxyAgent } = require('undici');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -186,10 +185,6 @@ class ChatGPTClient extends BaseClient {
headers: {
'Content-Type': 'application/json',
},
dispatcher: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
};
if (this.isVisionModel) {
@@ -275,10 +270,6 @@ class ChatGPTClient extends BaseClient {
opts.headers['X-Title'] = 'LibreChat';
}
if (this.options.proxy) {
opts.dispatcher = new ProxyAgent(this.options.proxy);
}
/* hacky fixes for Mistral AI API:
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
- If there is only one message and it's a system message, change the role to user

View File

@@ -1,22 +1,25 @@
const { google } = require('googleapis');
const { Agent, ProxyAgent } = require('undici');
const { concat } = require('@langchain/core/utils/stream');
const { ChatVertexAI } = require('@langchain/google-vertexai');
const { GoogleVertexAI } = require('@langchain/google-vertexai');
const { ChatGoogleVertexAI } = require('@langchain/google-vertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
const { AIMessage, HumanMessage, SystemMessage } = require('@langchain/core/messages');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { HumanMessage, SystemMessage } = require('@langchain/core/messages');
const {
googleGenConfigSchema,
validateVisionModel,
getResponseSender,
endpointSettings,
EModelEndpoint,
ContentTypes,
VisionModes,
ErrorTypes,
Constants,
AuthKeys,
} = require('librechat-data-provider');
const { getSafetySettings } = require('~/server/services/Endpoints/google/llm');
const { encodeAndFormat } = require('~/server/services/Files/images');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
@@ -31,7 +34,6 @@ const BaseClient = require('./BaseClient');
const loc = process.env.GOOGLE_LOC || 'us-central1';
const publisher = 'google';
const endpointPrefix = `${loc}-aiplatform.googleapis.com`;
const tokenizersCache = {};
const settings = endpointSettings[EModelEndpoint.google];
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
@@ -49,10 +51,11 @@ class GoogleClient extends BaseClient {
const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
this.serviceKey =
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : serviceKey ?? {};
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : (serviceKey ?? {});
/** @type {string | null | undefined} */
this.project_id = this.serviceKey.project_id;
this.client_email = this.serviceKey.client_email;
this.private_key = this.serviceKey.private_key;
this.project_id = this.serviceKey.project_id;
this.access_token = null;
this.apiKey = creds[AuthKeys.GOOGLE_API_KEY];
@@ -61,6 +64,17 @@ class GoogleClient extends BaseClient {
this.authHeader = options.authHeader;
/** @type {UsageMetadata | undefined} */
this.usage;
/** The key for the usage object's input tokens
* @type {string} */
this.inputTokensKey = 'input_tokens';
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
this.visionMode = VisionModes.generative;
/** @type {string} */
this.systemMessage;
if (options.skipSetOptions) {
return;
}
@@ -120,22 +134,13 @@ class GoogleClient extends BaseClient {
this.options = options;
}
this.options.examples = (this.options.examples ?? [])
.filter((ex) => ex)
.filter((obj) => obj.input.content !== '' && obj.output.content !== '');
this.modelOptions = this.options.modelOptions || {};
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
/** @type {boolean} Whether using a "GenerativeAI" Model */
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
const { isGenerativeModel } = this;
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
const { isChatModel } = this;
this.isTextModel =
!isGenerativeModel && !isChatModel && /code|text/.test(this.modelOptions.model);
const { isTextModel } = this;
this.isGenerativeModel =
this.modelOptions.model.includes('gemini') || this.modelOptions.model.includes('learnlm');
this.maxContextTokens =
this.options.maxContextTokens ??
@@ -171,50 +176,18 @@ class GoogleClient extends BaseClient {
this.userLabel = this.options.userLabel || 'User';
this.modelLabel = this.options.modelLabel || 'Assistant';
if (isChatModel || isGenerativeModel) {
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
// without tripping the stop sequences, so I'm using "||>" instead.
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
} else if (isTextModel) {
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
});
} else {
// Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting
// system that causes only the first "<|endoftext|>" to be counted as 1 token, and the rest are not treated
// as a single token. So we're using this instead.
this.startToken = '||>';
this.endToken = '';
try {
this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true);
} catch {
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true);
}
}
if (!this.modelOptions.stop) {
const stopTokens = [this.startToken];
if (this.endToken && this.endToken !== this.startToken) {
stopTokens.push(this.endToken);
}
stopTokens.push(`\n${this.userLabel}:`);
stopTokens.push('<|diff_marker|>');
// I chose not to do one for `modelLabel` because I've never seen it happen
this.modelOptions.stop = stopTokens;
}
if (this.options.reverseProxyUrl) {
this.completionsUrl = this.options.reverseProxyUrl;
} else {
this.completionsUrl = this.constructUrl();
}
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
this.systemMessage = promptPrefix;
this.initializeClient();
return this;
}
@@ -246,10 +219,29 @@ class GoogleClient extends BaseClient {
}
formatMessages() {
return ((message) => ({
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
content: message?.content ?? message.text,
})).bind(this);
return ((message) => {
const msg = {
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
content: message?.content ?? message.text,
};
if (!message.image_urls?.length) {
return msg;
}
msg.content = (
!Array.isArray(msg.content)
? [
{
type: ContentTypes.TEXT,
[ContentTypes.TEXT]: msg.content,
},
]
: msg.content
).concat(message.image_urls);
return msg;
}).bind(this);
}
/**
@@ -324,7 +316,7 @@ class GoogleClient extends BaseClient {
}
this.augmentedPrompt = await this.contextHandlers.createContext();
this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
this.systemMessage = this.augmentedPrompt + this.systemMessage;
}
}
@@ -347,7 +339,6 @@ class GoogleClient extends BaseClient {
messages: [new HumanMessage(formatMessage({ message: latestMessage }))],
},
],
parameters: this.modelOptions,
};
return { prompt: payload };
}
@@ -363,23 +354,58 @@ class GoogleClient extends BaseClient {
return { prompt: formattedMessages };
}
async buildMessages(messages = [], parentMessageId) {
/**
* @param {TMessage[]} [messages=[]]
* @param {string} [parentMessageId]
*/
async buildMessages(_messages = [], parentMessageId) {
if (!this.isGenerativeModel && !this.project_id) {
throw new Error(
'[GoogleClient] a Service Account JSON Key is required for PaLM 2 and Codey models (Vertex AI)',
);
throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.');
}
if (this.systemMessage) {
const instructionsTokenCount = this.getTokenCount(this.systemMessage);
this.maxContextTokens = this.maxContextTokens - instructionsTokenCount;
if (this.maxContextTokens < 0) {
const info = `${instructionsTokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Instructions token count exceeds max context (${info}).`);
throw new Error(errorMessage);
}
}
for (let i = 0; i < _messages.length; i++) {
const message = _messages[i];
if (!message.tokenCount) {
_messages[i].tokenCount = this.getTokenCountForMessage({
role: message.isCreatedByUser ? 'user' : 'assistant',
content: message.content ?? message.text,
});
}
}
const {
payload: messages,
tokenCountMap,
promptTokens,
} = await this.handleContextStrategy({
orderedMessages: _messages,
formattedMessages: _messages,
});
if (!this.project_id && !EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)) {
return await this.buildGenerativeMessages(messages);
const result = await this.buildGenerativeMessages(messages);
result.tokenCountMap = tokenCountMap;
result.promptTokens = promptTokens;
return result;
}
if (this.options.attachments && this.isGenerativeModel) {
return this.buildVisionMessages(messages, parentMessageId);
}
if (this.isTextModel) {
return this.buildMessagesPrompt(messages, parentMessageId);
const result = this.buildVisionMessages(messages, parentMessageId);
result.tokenCountMap = tokenCountMap;
result.promptTokens = promptTokens;
return result;
}
let payload = {
@@ -391,25 +417,14 @@ class GoogleClient extends BaseClient {
.map((message) => formatMessage({ message, langChain: true })),
},
],
parameters: this.modelOptions,
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (promptPrefix) {
payload.instances[0].context = promptPrefix;
}
if (this.options.examples.length > 0) {
payload.instances[0].examples = this.options.examples;
if (this.systemMessage) {
payload.instances[0].context = this.systemMessage;
}
logger.debug('[GoogleClient] buildMessages', payload);
return { prompt: payload };
return { prompt: payload, tokenCountMap, promptTokens };
}
async buildMessagesPrompt(messages, parentMessageId) {
@@ -423,10 +438,7 @@ class GoogleClient extends BaseClient {
parentMessageId,
});
const formattedMessages = orderedMessages.map((message) => ({
author: message.isCreatedByUser ? this.userLabel : this.modelLabel,
content: message?.content ?? message.text,
}));
const formattedMessages = orderedMessages.map(this.formatMessages());
let lastAuthor = '';
let groupedMessages = [];
@@ -454,17 +466,7 @@ class GoogleClient extends BaseClient {
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
}
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (promptPrefix) {
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
}
promptPrefix = `\nContext:\n${promptPrefix}`;
}
let promptPrefix = (this.systemMessage ?? '').trim();
if (identityPrefix) {
promptPrefix = `${identityPrefix}${promptPrefix}`;
@@ -501,7 +503,7 @@ class GoogleClient extends BaseClient {
isCreatedByUser || !isEdited
? `\n\n${message.author}:`
: `${promptPrefix}\n\n${message.author}:`;
const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`;
const messageString = `${messagePrefix}\n${message.content}\n`;
let newPromptBody = `${messageString}${promptBody}`;
context.unshift(message);
@@ -567,34 +569,6 @@ class GoogleClient extends BaseClient {
return { prompt, context };
}
async _getCompletion(payload, abortController = null) {
if (!abortController) {
abortController = new AbortController();
}
const { debug } = this.options;
const url = this.completionsUrl;
if (debug) {
logger.debug('GoogleClient _getCompletion', { url, payload });
}
const opts = {
method: 'POST',
agent: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
signal: abortController.signal,
};
if (this.options.proxy) {
opts.agent = new ProxyAgent(this.options.proxy);
}
const client = await this.getClient();
const res = await client.request({ url, method: 'POST', data: payload });
logger.debug('GoogleClient _getCompletion', { res });
return res.data;
}
createLLM(clientOptions) {
const model = clientOptions.modelName ?? clientOptions.model;
clientOptions.location = loc;
@@ -613,33 +587,30 @@ class GoogleClient extends BaseClient {
}
}
if (this.project_id && this.isTextModel) {
logger.debug('Creating Google VertexAI client');
return new GoogleVertexAI(clientOptions);
} else if (this.project_id && this.isChatModel) {
logger.debug('Creating Chat Google VertexAI client');
return new ChatGoogleVertexAI(clientOptions);
} else if (this.project_id) {
if (this.project_id != null) {
logger.debug('Creating VertexAI client');
return new ChatVertexAI(clientOptions);
this.visionMode = undefined;
clientOptions.streaming = true;
const client = new ChatVertexAI(clientOptions);
client.temperature = clientOptions.temperature;
client.topP = clientOptions.topP;
client.topK = clientOptions.topK;
client.topLogprobs = clientOptions.topLogprobs;
client.frequencyPenalty = clientOptions.frequencyPenalty;
client.presencePenalty = clientOptions.presencePenalty;
client.maxOutputTokens = clientOptions.maxOutputTokens;
return client;
} else if (!EXCLUDED_GENAI_MODELS.test(model)) {
logger.debug('Creating GenAI client');
return new GenAI(this.apiKey).getGenerativeModel({ ...clientOptions, model }, requestOptions);
return new GenAI(this.apiKey).getGenerativeModel({ model }, requestOptions);
}
logger.debug('Creating Chat Google Generative AI client');
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
}
async getCompletion(_payload, options = {}) {
const { parameters, instances } = _payload;
const { onProgress, abortController } = options;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {};
let examples;
let clientOptions = { ...parameters, maxRetries: 2 };
initializeClient() {
let clientOptions = { ...this.modelOptions };
if (this.project_id) {
clientOptions['authOptions'] = {
@@ -650,184 +621,248 @@ class GoogleClient extends BaseClient {
};
}
if (!parameters) {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
if (_examples && _examples.length) {
examples = _examples
.map((ex) => {
const { input, output } = ex;
if (!input || !output) {
return undefined;
}
return {
input: new HumanMessage(input.content),
output: new AIMessage(output.content),
};
})
.filter((ex) => ex);
this.client = this.createLLM(clientOptions);
return this.client;
}
clientOptions.examples = examples;
}
const model = this.createLLM(clientOptions);
async getCompletion(_payload, options = {}) {
const { onProgress, abortController } = options;
const safetySettings = getSafetySettings(this.modelOptions.model);
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const modelName = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
let reply = '';
const messages = this.isTextModel ? _payload.trim() : _messages;
if (!this.isVisionModel && context && messages?.length > 0) {
messages.unshift(new SystemMessage(context));
}
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
const client = model;
const requestOptions = {
contents: _payload,
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (promptPrefix.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
/** @type {Error} */
let error;
try {
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
/** @type {GenerativeModel} */
const client = this.client;
/** @type {GenerateContentRequest} */
const requestOptions = {
safetySettings,
contents: _payload,
generationConfig: googleGenConfigSchema.parse(this.modelOptions),
};
const promptPrefix = (this.systemMessage ?? '').trim();
if (promptPrefix.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
};
}
const delay = modelName.includes('flash') ? 8 : 15;
/** @type {GenAIUsageMetadata} */
let usageMetadata;
abortController.signal.addEventListener(
'abort',
() => {
logger.warn('[GoogleClient] Request was aborted', abortController.signal.reason);
},
{ once: true },
);
const result = await client.generateContentStream(requestOptions, {
signal: abortController.signal,
});
for await (const chunk of result.stream) {
usageMetadata = !usageMetadata
? chunk?.usageMetadata
: Object.assign(usageMetadata, chunk?.usageMetadata);
const chunkText = chunk.text();
await this.generateTextStream(chunkText, onProgress, {
delay,
});
reply += chunkText;
await sleep(streamRate);
}
if (usageMetadata) {
this.usage = {
input_tokens: usageMetadata.promptTokenCount,
output_tokens: usageMetadata.candidatesTokenCount,
};
}
return reply;
}
requestOptions.safetySettings = _payload.safetySettings;
const { instances } = _payload;
const { messages: messages, context } = instances?.[0] ?? {};
const delay = modelName.includes('flash') ? 8 : 15;
const result = await client.generateContentStream(requestOptions);
for await (const chunk of result.stream) {
const chunkText = chunk.text();
if (!this.isVisionModel && context && messages?.length > 0) {
messages.unshift(new SystemMessage(context));
}
/** @type {import('@langchain/core/messages').AIMessageChunk['usage_metadata']} */
let usageMetadata;
/** @type {ChatVertexAI} */
const client = this.client;
const stream = await client.stream(messages, {
signal: abortController.signal,
streamUsage: true,
safetySettings,
});
let delay = this.options.streamRate || 8;
if (!this.options.streamRate) {
if (this.isGenerativeModel) {
delay = 15;
}
if (modelName.includes('flash')) {
delay = 5;
}
}
for await (const chunk of stream) {
if (chunk?.usage_metadata) {
const metadata = chunk.usage_metadata;
for (const key in metadata) {
if (Number.isNaN(metadata[key])) {
delete metadata[key];
}
}
usageMetadata = !usageMetadata ? metadata : concat(usageMetadata, metadata);
}
const chunkText = chunk?.content ?? '';
await this.generateTextStream(chunkText, onProgress, {
delay,
});
reply += chunkText;
await sleep(streamRate);
}
return reply;
if (usageMetadata) {
this.usage = usageMetadata;
}
} catch (e) {
error = e;
logger.error('[GoogleClient] There was an issue generating the completion', e);
}
const stream = await model.stream(messages, {
signal: abortController.signal,
safetySettings: _payload.safetySettings,
});
let delay = this.options.streamRate || 8;
if (!this.options.streamRate) {
if (this.isGenerativeModel) {
delay = 15;
}
if (modelName.includes('flash')) {
delay = 5;
}
if (error != null && reply === '') {
const errorMessage = `{ "type": "${ErrorTypes.GoogleError}", "info": "${
error.message ?? 'The Google provider failed to generate content, please contact the Admin.'
}" }`;
throw new Error(errorMessage);
}
for await (const chunk of stream) {
const chunkText = chunk?.content ?? chunk;
await this.generateTextStream(chunkText, onProgress, {
delay,
});
reply += chunkText;
}
return reply;
}
/**
* Get stream usage as returned by this client's API response.
* @returns {UsageMetadata} The stream usage object.
*/
getStreamUsage() {
return this.usage;
}
/**
* Calculates the correct token count for the current user message based on the token count map and API usage.
* Edge case: If the calculation results in a negative value, it returns the original estimate.
* If revisiting a conversation with a chat history entirely composed of token estimates,
* the cumulative token count going forward should become more accurate as the conversation progresses.
* @param {Object} params - The parameters for the calculation.
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
* @param {string} params.currentMessageId - The ID of the current message to calculate.
* @param {UsageMetadata} params.usage - The usage object returned by the API.
* @returns {number} The correct token count for the current user message.
*/
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
const originalEstimate = tokenCountMap[currentMessageId] || 0;
if (!usage || typeof usage.input_tokens !== 'number') {
return originalEstimate;
}
tokenCountMap[currentMessageId] = 0;
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
const numCount = Number(count);
return sum + (isNaN(numCount) ? 0 : numCount);
}, 0);
const totalInputTokens = usage.input_tokens ?? 0;
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
}
/**
* @param {object} params
* @param {number} params.promptTokens
* @param {number} params.completionTokens
* @param {UsageMetadata} [params.usage]
* @param {string} [params.model]
* @param {string} [params.context='message']
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
await spendTokens(
{
context,
user: this.user ?? this.options.req?.user?.id,
conversationId: this.conversationId,
model: model ?? this.modelOptions.model,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
);
}
/**
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
*/
async titleChatCompletion(_payload, options = {}) {
const { abortController } = options;
const { parameters, instances } = _payload;
const { messages: _messages, examples: _examples } = instances?.[0] ?? {};
let clientOptions = { ...parameters, maxRetries: 2 };
logger.debug('Initialized title client options');
if (this.project_id) {
clientOptions['authOptions'] = {
credentials: {
...this.serviceKey,
},
projectId: this.project_id,
};
}
if (!parameters) {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
const model = this.createLLM(clientOptions);
let reply = '';
const messages = this.isTextModel ? _payload.trim() : _messages;
const { abortController } = options;
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
const model = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const safetySettings = getSafetySettings(model);
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
logger.debug('Identified titling model as GenAI version');
/** @type {GenerativeModel} */
const client = model;
const client = this.client;
const requestOptions = {
contents: _payload,
safetySettings,
generationConfig: {
temperature: 0.5,
},
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
};
}
const safetySettings = _payload.safetySettings;
requestOptions.safetySettings = safetySettings;
const result = await client.generateContent(requestOptions);
reply = result.response?.text();
return reply;
} else {
logger.debug('Beginning titling');
const safetySettings = _payload.safetySettings;
const titleResponse = await model.invoke(messages, {
const { instances } = _payload;
const { messages } = instances?.[0] ?? {};
const titleResponse = await this.client.invoke(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings: safetySettings,
safetySettings,
});
if (titleResponse.usage_metadata) {
await this.recordTokenUsage({
model,
promptTokens: titleResponse.usage_metadata.input_tokens,
completionTokens: titleResponse.usage_metadata.output_tokens,
context: 'title',
});
}
reply = titleResponse.content;
// TODO: RECORD TOKEN USAGE
return reply;
}
}
@@ -851,15 +886,8 @@ class GoogleClient extends BaseClient {
},
]);
if (this.isVisionModel) {
logger.warn(
`Current vision model does not support titling without an attachment; falling back to default model ${settings.model.default}`,
);
payload.parameters = { ...payload.parameters, model: settings.model.default };
}
try {
this.initializeClient();
title = await this.titleChatCompletion(payload, {
abortController: new AbortController(),
onProgress: () => {},
@@ -873,8 +901,10 @@ class GoogleClient extends BaseClient {
getSaveOptions() {
return {
endpointType: null,
artifacts: this.options.artifacts,
promptPrefix: this.options.promptPrefix,
maxContextTokens: this.options.maxContextTokens,
modelLabel: this.options.modelLabel,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
@@ -888,61 +918,39 @@ class GoogleClient extends BaseClient {
}
async sendCompletion(payload, opts = {}) {
payload.safetySettings = this.getSafetySettings();
let reply = '';
reply = await this.getCompletion(payload, opts);
return reply.trim();
}
getSafetySettings() {
return [
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold:
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold:
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_CIVIC_INTEGRITY',
/**
* Note: this was added since `gemini-2.0-flash-thinking-exp-1219` does not
* accept 'HARM_BLOCK_THRESHOLD_UNSPECIFIED' for 'HARM_CATEGORY_CIVIC_INTEGRITY'
* */
threshold: process.env.GOOGLE_SAFETY_CIVIC_INTEGRITY || 'BLOCK_NONE',
},
];
getEncoding() {
return 'cl100k_base';
}
/* TO-DO: Handle tokens with Google tokenization NOTE: these are required */
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
if (tokenizersCache[encoding]) {
return tokenizersCache[encoding];
}
let tokenizer;
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
return tokenizer;
async getVertexTokenCount(text) {
/** @type {ChatVertexAI} */
const client = this.client ?? this.initializeClient();
const connection = client.connection;
const gAuthClient = connection.client;
const tokenEndpoint = `https://${connection._endpoint}/${connection.apiVersion}/projects/${this.project_id}/locations/${connection._location}/publishers/google/models/${connection.model}/:countTokens`;
const result = await gAuthClient.request({
url: tokenEndpoint,
method: 'POST',
data: {
contents: [{ role: 'user', parts: [{ text }] }],
},
});
return result;
}
/**
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
* @param {string} text - The text to get the token count for.
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
}
}

View File

@@ -2,7 +2,7 @@ const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { Constants } = require('librechat-data-provider');
const { deriveBaseURL } = require('~/utils');
const { deriveBaseURL, logAxiosError } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
@@ -68,7 +68,7 @@ class OllamaClient {
} catch (error) {
const logMessage =
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
logger.error(logMessage, error);
logAxiosError({ message: logMessage, error });
return [];
}
}

View File

@@ -1,11 +1,13 @@
const OpenAI = require('openai');
const { OllamaClient } = require('./OllamaClient');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { SplitStreamHandler, GraphEvents } = require('@librechat/agents');
const {
Constants,
ImageDetail,
EModelEndpoint,
resolveHeaders,
KnownEndpoints,
openAISettings,
ImageDetailCost,
CohereConstants,
@@ -13,7 +15,6 @@ const {
validateVisionModel,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
extractBaseURL,
constructAzureURL,
@@ -29,21 +30,17 @@ const {
createContextHandlers,
} = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const { isEnabled, sleep } = require('~/server/utils');
const { handleOpenAIErrors } = require('./tools/util');
const { createLLM, RunManager } = require('./llm');
const { logger, sendEvent } = require('~/config');
const ChatGPTClient = require('./ChatGPTClient');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
// Cache to store Tiktoken instances
const tokenizersCache = {};
// Counter for keeping track of the number of tokenizer calls
let tokenizerCallsCount = 0;
class OpenAIClient extends BaseClient {
constructor(apiKey, options = {}) {
@@ -69,7 +66,9 @@ class OpenAIClient extends BaseClient {
/** @type {OpenAIUsageMetadata | undefined} */
this.usage;
/** @type {boolean|undefined} */
this.isO1Model;
this.isOmni;
/** @type {SplitStreamHandler | undefined} */
this.streamHandler;
}
// TODO: PluginsClient calls this 3x, unneeded
@@ -107,22 +106,13 @@ class OpenAIClient extends BaseClient {
this.checkVisionRequest(this.options.attachments);
}
const o1Pattern = /\bo1\b/i;
this.isO1Model = o1Pattern.test(this.modelOptions.model);
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
this.apiKey = OPENROUTER_API_KEY;
this.useOpenRouter = true;
}
const omniPattern = /\b(o1|o3)\b/i;
this.isOmni = omniPattern.test(this.modelOptions.model);
const { OPENAI_FORCE_PROMPT } = process.env ?? {};
const { reverseProxyUrl: reverseProxy } = this.options;
if (
!this.useOpenRouter &&
reverseProxy &&
reverseProxy.includes('https://openrouter.ai/api/v1')
) {
if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
this.useOpenRouter = true;
}
@@ -148,7 +138,7 @@ class OpenAIClient extends BaseClient {
const { model } = this.modelOptions;
this.isChatCompletion =
o1Pattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy;
omniPattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy;
this.isChatGptModel = this.isChatCompletion;
if (
model.includes('text-davinci') ||
@@ -307,75 +297,8 @@ class OpenAIClient extends BaseClient {
}
}
// Selects an appropriate tokenizer based on the current configuration of the client instance.
// It takes into account factors such as whether it's a chat completion, an unofficial chat GPT model, etc.
selectTokenizer() {
let tokenizer;
this.encoding = 'text-davinci-003';
if (this.isChatCompletion) {
this.encoding = this.modelOptions.model.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
tokenizer = this.constructor.getTokenizer(this.encoding);
} else if (this.isUnofficialChatGptModel) {
const extendSpecialTokens = {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
};
tokenizer = this.constructor.getTokenizer(this.encoding, true, extendSpecialTokens);
} else {
try {
const { model } = this.modelOptions;
this.encoding = model.includes('instruct') ? 'text-davinci-003' : model;
tokenizer = this.constructor.getTokenizer(this.encoding, true);
} catch {
tokenizer = this.constructor.getTokenizer('text-davinci-003', true);
}
}
return tokenizer;
}
// Retrieves a tokenizer either from the cache or creates a new one if one doesn't exist in the cache.
// If a tokenizer is being created, it's also added to the cache.
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
let tokenizer;
if (tokenizersCache[encoding]) {
tokenizer = tokenizersCache[encoding];
} else {
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
}
return tokenizer;
}
// Frees all encoders in the cache and resets the count.
static freeAndResetAllEncoders() {
try {
Object.keys(tokenizersCache).forEach((key) => {
if (tokenizersCache[key]) {
tokenizersCache[key].free();
delete tokenizersCache[key];
}
});
// Reset count
tokenizerCallsCount = 1;
} catch (error) {
logger.error('[OpenAIClient] Free and reset encoders error', error);
}
}
// Checks if the cache of tokenizers has reached a certain size. If it has, it frees and resets all tokenizers.
resetTokenizersIfNecessary() {
if (tokenizerCallsCount >= 25) {
if (this.options.debug) {
logger.debug('[OpenAIClient] freeAndResetAllEncoders: reached 25 encodings, resetting...');
}
this.constructor.freeAndResetAllEncoders();
}
tokenizerCallsCount++;
getEncoding() {
return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
}
/**
@@ -384,15 +307,8 @@ class OpenAIClient extends BaseClient {
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
this.resetTokenizersIfNecessary();
try {
const tokenizer = this.selectTokenizer();
return tokenizer.encode(text, 'all').length;
} catch (error) {
this.constructor.freeAndResetAllEncoders();
const tokenizer = this.selectTokenizer();
return tokenizer.encode(text, 'all').length;
}
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
}
/**
@@ -551,7 +467,7 @@ class OpenAIClient extends BaseClient {
promptPrefix = this.augmentedPrompt + promptPrefix;
}
if (promptPrefix && this.isO1Model !== true) {
if (promptPrefix && this.isOmni !== true) {
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
instructions = {
role: 'system',
@@ -579,12 +495,11 @@ class OpenAIClient extends BaseClient {
};
/** EXPERIMENTAL */
if (promptPrefix && this.isO1Model === true) {
if (promptPrefix && this.isOmni === true) {
const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user');
if (lastUserMessageIndex !== -1) {
payload[
lastUserMessageIndex
].content = `${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
payload[lastUserMessageIndex].content =
`${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
}
}
@@ -693,8 +608,6 @@ class OpenAIClient extends BaseClient {
model = 'gpt-4o-mini',
modelName,
temperature = 0.2,
presence_penalty = 0,
frequency_penalty = 0,
max_tokens,
streaming,
context,
@@ -705,8 +618,6 @@ class OpenAIClient extends BaseClient {
const modelOptions = {
modelName: modelName ?? model,
temperature,
presence_penalty,
frequency_penalty,
user: this.user,
};
@@ -877,7 +788,11 @@ ${convo}
}
title = (
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion })
await this.sendPayload(instructionsPayload, {
modelOptions,
useChatCompletion,
context: 'title',
})
).replaceAll('"', '');
const completionTokens = this.getTokenCount(title);
@@ -1010,7 +925,10 @@ ${convo}
);
if (excessTokenCount > maxContextTokens) {
({ context } = await this.getMessagesWithinTokenLimit(context, maxContextTokens));
({ context } = await this.getMessagesWithinTokenLimit({
messages: context,
maxContextTokens,
}));
}
if (context.length === 0) {
@@ -1140,10 +1058,58 @@ ${convo}
});
}
/**
*
* @param {string[]} [intermediateReply]
* @returns {string}
*/
getStreamText(intermediateReply) {
if (!this.streamHandler) {
return intermediateReply?.join('') ?? '';
}
let thinkMatch;
let remainingText;
let reasoningText = '';
if (this.streamHandler.reasoningTokens.length > 0) {
reasoningText = this.streamHandler.reasoningTokens.join('');
thinkMatch = reasoningText.match(/<think>([\s\S]*?)<\/think>/)?.[1]?.trim();
if (thinkMatch != null && thinkMatch) {
const reasoningTokens = `:::thinking\n${thinkMatch}\n:::\n`;
remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || '';
return `${reasoningTokens}${remainingText}${this.streamHandler.tokens.join('')}`;
} else if (thinkMatch === '') {
remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || '';
return `${remainingText}${this.streamHandler.tokens.join('')}`;
}
}
const reasoningTokens =
reasoningText.length > 0
? `:::thinking\n${reasoningText.replace('<think>', '').replace('</think>', '').trim()}\n:::\n`
: '';
return `${reasoningTokens}${this.streamHandler.tokens.join('')}`;
}
getMessageMapMethod() {
/**
* @param {TMessage} msg
*/
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
}
return msg;
};
}
async chatCompletion({ payload, onProgress, abortController = null }) {
let error = null;
let intermediateReply = [];
const errorCallback = (err) => (error = err);
const intermediateReply = [];
try {
if (!abortController) {
abortController = new AbortController();
@@ -1247,7 +1213,7 @@ ${convo}
opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
}
if (this.isO1Model === true && modelOptions.max_tokens != null) {
if (this.isOmni === true && modelOptions.max_tokens != null) {
modelOptions.max_completion_tokens = modelOptions.max_tokens;
delete modelOptions.max_tokens;
}
@@ -1327,23 +1293,53 @@ ${convo}
let streamResolve;
if (
this.isO1Model === true &&
this.isOmni === true &&
(this.azure || /o1(?!-(?:mini|preview)).*$/.test(modelOptions.model)) &&
!/o3-.*$/.test(this.modelOptions.model) &&
modelOptions.stream
) {
delete modelOptions.stream;
delete modelOptions.stop;
} else if (!this.isOmni && modelOptions.reasoning_effort != null) {
delete modelOptions.reasoning_effort;
}
let reasoningKey = 'reasoning_content';
if (this.useOpenRouter) {
modelOptions.include_reasoning = true;
reasoningKey = 'reasoning';
}
this.streamHandler = new SplitStreamHandler({
reasoningKey,
accumulate: true,
runId: this.responseMessageId,
handlers: {
[GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
},
});
intermediateReply = this.streamHandler.tokens;
if (modelOptions.stream) {
streamPromise = new Promise((resolve) => {
streamResolve = resolve;
});
/** @type {OpenAI.OpenAI.CompletionCreateParamsStreaming} */
const params = {
...modelOptions,
stream: true,
};
if (
this.options.endpoint === EModelEndpoint.openAI ||
this.options.endpoint === EModelEndpoint.azureOpenAI
) {
params.stream_options = { include_usage: true };
}
const stream = await openai.beta.chat.completions
.stream({
...modelOptions,
stream: true,
})
.stream(params)
.on('abort', () => {
/* Do nothing here */
})
@@ -1361,20 +1357,44 @@ ${convo}
}
if (typeof finalMessage.content !== 'string' || finalMessage.content.trim() === '') {
finalChatCompletion.choices[0].message.content = intermediateReply.join('');
finalChatCompletion.choices[0].message.content = this.streamHandler.tokens.join('');
}
})
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({ role: 'assistant', content: intermediateReply.join('') });
stream.messages.push({
role: 'assistant',
content: this.streamHandler.tokens.join(''),
});
UnexpectedRoleError = true;
}
});
if (this.continued === true) {
const latestText = addSpaceIfNeeded(
this.currentMessages[this.currentMessages.length - 1]?.text ?? '',
);
this.streamHandler.handle({
choices: [
{
delta: {
content: latestText,
},
},
],
});
}
for await (const chunk of stream) {
const token = chunk.choices[0]?.delta?.content || '';
intermediateReply.push(token);
onProgress(token);
// Add finish_reason: null if missing in any choice
if (chunk.choices) {
chunk.choices.forEach((choice) => {
if (!('finish_reason' in choice)) {
choice.finish_reason = null;
}
});
}
this.streamHandler.handle(chunk);
if (abortController.signal.aborted) {
stream.controller.abort();
break;
@@ -1417,7 +1437,7 @@ ${convo}
if (!Array.isArray(choices) || choices.length === 0) {
logger.warn('[OpenAIClient] Chat completion response has no choices');
return intermediateReply.join('');
return this.streamHandler.tokens.join('');
}
const { message, finish_reason } = choices[0] ?? {};
@@ -1427,11 +1447,11 @@ ${convo}
if (!message) {
logger.warn('[OpenAIClient] Message is undefined in chatCompletion response');
return intermediateReply.join('');
return this.streamHandler.tokens.join('');
}
if (typeof message.content !== 'string' || message.content.trim() === '') {
const reply = intermediateReply.join('');
const reply = this.streamHandler.tokens.join('');
logger.debug(
'[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content',
{ intermediateReply: reply },
@@ -1439,13 +1459,27 @@ ${convo}
return reply;
}
if (
this.streamHandler.reasoningTokens.length > 0 &&
this.options.context !== 'title' &&
!message.content.startsWith('<think>')
) {
return this.getStreamText();
} else if (
this.streamHandler.reasoningTokens.length > 0 &&
this.options.context !== 'title' &&
message.content.startsWith('<think>')
) {
return this.getStreamText();
}
return message.content;
} catch (err) {
if (
err?.message?.includes('abort') ||
(err instanceof OpenAI.APIError && err?.message?.includes('abort'))
) {
return intermediateReply.join('');
return this.getStreamText(intermediateReply);
}
if (
err?.message?.includes(
@@ -1460,10 +1494,18 @@ ${convo}
(err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason'))
) {
logger.error('[OpenAIClient] Known OpenAI error:', err);
return intermediateReply.join('');
if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
} else {
throw err;
}
} else if (err instanceof OpenAI.APIError) {
if (intermediateReply.length > 0) {
return intermediateReply.join('');
if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
} else {
throw err;
}

View File

@@ -1,5 +1,4 @@
const OpenAIClient = require('./OpenAIClient');
const { CacheKeys, Time } = require('librechat-data-provider');
const { CallbackManager } = require('@langchain/core/callbacks/manager');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
@@ -11,7 +10,6 @@ const checkBalance = require('~/models/checkBalance');
const { isEnabled } = require('~/server/utils');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
class PluginsClient extends OpenAIClient {
@@ -256,15 +254,6 @@ class PluginsClient extends OpenAIClient {
}
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessage.messageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
delete responseMessage.tokenCount;
return { ...responseMessage, ...result };
}
@@ -291,7 +280,6 @@ class PluginsClient extends OpenAIClient {
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
const {
user,
isEdited,
conversationId,
responseMessageId,
saveOptions,
@@ -370,7 +358,6 @@ class PluginsClient extends OpenAIClient {
conversationId,
parentMessageId: userMessage.messageId,
isCreatedByUser: false,
isEdited,
model: this.modelOptions.model,
sender: this.sender,
promptTokens,

View File

@@ -1,7 +1,7 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
* @param {Array<AnthropicMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage>} - The updated array of message objects with cache control added.
* @param {Array<AnthropicMessage | BaseMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage | BaseMessage>} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
@@ -13,7 +13,9 @@ function addCacheControl(messages) {
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
if (message.role !== 'user') {
if (message.getType != null && message.getType() !== 'human') {
continue;
} else if (message.getType == null && message.role !== 'user') {
continue;
}

View File

@@ -282,4 +282,47 @@ describe('formatAgentMessages', () => {
// Additional check to ensure the consecutive assistant messages were combined
expect(result[1].content).toHaveLength(2);
});
it('should skip THINK type content parts', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Initial response' },
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Reasoning about the problem...' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(result[0].content).toEqual('Initial response\nFinal answer');
});
it('should join TEXT content as string when THINK content type is present', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Analyzing the problem...' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'First part of response' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Second part of response' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final part of response' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(typeof result[0].content).toBe('string');
expect(result[0].content).toBe(
'First part of response\nSecond part of response\nFinal part of response',
);
expect(result[0].content).not.toContain('Analyzing the problem...');
});
});

View File

@@ -153,6 +153,7 @@ const formatAgentMessages = (payload) => {
let currentContent = [];
let lastAIMessage = null;
let hasReasoning = false;
for (const part of message.content) {
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
/*
@@ -207,11 +208,25 @@ const formatAgentMessages = (payload) => {
content: output || '',
}),
);
} else if (part.type === ContentTypes.THINK) {
hasReasoning = true;
continue;
} else {
currentContent.push(part);
}
}
if (hasReasoning) {
currentContent = currentContent
.reduce((acc, curr) => {
if (curr.type === ContentTypes.TEXT) {
return `${acc}${curr[ContentTypes.TEXT]}\n`;
}
return acc;
}, '')
.trim();
}
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
}

View File

@@ -60,7 +60,6 @@ describe('formatMessage', () => {
error: false,
finish_reason: null,
isCreatedByUser: true,
isEdited: false,
model: null,
parentMessageId: Constants.NO_PARENT,
sender: 'User',

View File

@@ -4,7 +4,7 @@ const summaryPrompts = require('./summaryPrompts');
const handleInputs = require('./handleInputs');
const instructions = require('./instructions');
const titlePrompts = require('./titlePrompts');
const truncateText = require('./truncateText');
const truncate = require('./truncate');
const createVisionPrompt = require('./createVisionPrompt');
const createContextHandlers = require('./createContextHandlers');
@@ -15,7 +15,7 @@ module.exports = {
...handleInputs,
...instructions,
...titlePrompts,
...truncateText,
...truncate,
createVisionPrompt,
createContextHandlers,
};

View File

@@ -0,0 +1,115 @@
const MAX_CHAR = 255;
/**
* Truncates a given text to a specified maximum length, appending ellipsis and a notification
* if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text if the original text length exceeds maxLength, otherwise returns the original text.
*/
function truncateText(text, maxLength = MAX_CHAR) {
if (text.length > maxLength) {
return `${text.slice(0, maxLength)}... [text truncated for brevity]`;
}
return text;
}
/**
* Truncates a given text to a specified maximum length by showing the first half and the last half of the text,
* separated by ellipsis. This method ensures the output does not exceed the maximum length, including the addition
* of ellipsis and notification if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the output text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text showing the first half and the last half, or the original text if it does not exceed maxLength.
*/
function smartTruncateText(text, maxLength = MAX_CHAR) {
const ellipsis = '...';
const notification = ' [text truncated for brevity]';
const halfMaxLength = Math.floor((maxLength - ellipsis.length - notification.length) / 2);
if (text.length > maxLength) {
const startLastHalf = text.length - halfMaxLength;
return `${text.slice(0, halfMaxLength)}${ellipsis}${text.slice(startLastHalf)}${notification}`;
}
return text;
}
/**
* @param {TMessage[]} _messages
* @param {number} maxContextTokens
* @param {function({role: string, content: TMessageContent[]}): number} getTokenCountForMessage
*
* @returns {{
* dbMessages: TMessage[],
* editedIndices: number[]
* }}
*/
function truncateToolCallOutputs(_messages, maxContextTokens, getTokenCountForMessage) {
const THRESHOLD_PERCENTAGE = 0.5;
const targetTokenLimit = maxContextTokens * THRESHOLD_PERCENTAGE;
let currentTokenCount = 3;
const messages = [..._messages];
const processedMessages = [];
let currentIndex = messages.length;
const editedIndices = new Set();
while (messages.length > 0) {
currentIndex--;
const message = messages.pop();
currentTokenCount += message.tokenCount;
if (currentTokenCount < targetTokenLimit) {
processedMessages.push(message);
continue;
}
if (!message.content || !Array.isArray(message.content)) {
processedMessages.push(message);
continue;
}
const toolCallIndices = message.content
.map((item, index) => (item.type === 'tool_call' ? index : -1))
.filter((index) => index !== -1)
.reverse();
if (toolCallIndices.length === 0) {
processedMessages.push(message);
continue;
}
const newContent = [...message.content];
// Truncate all tool outputs since we're over threshold
for (const index of toolCallIndices) {
const toolCall = newContent[index].tool_call;
if (!toolCall || !toolCall.output) {
continue;
}
editedIndices.add(currentIndex);
newContent[index] = {
...newContent[index],
tool_call: {
...toolCall,
output: '[OUTPUT_OMITTED_FOR_BREVITY]',
},
};
}
const truncatedMessage = {
...message,
content: newContent,
tokenCount: getTokenCountForMessage({ role: 'assistant', content: newContent }),
};
processedMessages.push(truncatedMessage);
}
return { dbMessages: processedMessages.reverse(), editedIndices: Array.from(editedIndices) };
}
module.exports = { truncateText, smartTruncateText, truncateToolCallOutputs };

View File

@@ -1,40 +0,0 @@
const MAX_CHAR = 255;
/**
* Truncates a given text to a specified maximum length, appending ellipsis and a notification
* if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text if the original text length exceeds maxLength, otherwise returns the original text.
*/
function truncateText(text, maxLength = MAX_CHAR) {
if (text.length > maxLength) {
return `${text.slice(0, maxLength)}... [text truncated for brevity]`;
}
return text;
}
/**
* Truncates a given text to a specified maximum length by showing the first half and the last half of the text,
* separated by ellipsis. This method ensures the output does not exceed the maximum length, including the addition
* of ellipsis and notification if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the output text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text showing the first half and the last half, or the original text if it does not exceed maxLength.
*/
function smartTruncateText(text, maxLength = MAX_CHAR) {
const ellipsis = '...';
const notification = ' [text truncated for brevity]';
const halfMaxLength = Math.floor((maxLength - ellipsis.length - notification.length) / 2);
if (text.length > maxLength) {
const startLastHalf = text.length - halfMaxLength;
return `${text.slice(0, halfMaxLength)}${ellipsis}${text.slice(startLastHalf)}${notification}`;
}
return text;
}
module.exports = { truncateText, smartTruncateText };

View File

@@ -1,3 +1,4 @@
const { SplitStreamHandler } = require('@librechat/agents');
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
@@ -405,4 +406,278 @@ describe('AnthropicClient', () => {
expect(Number.isNaN(result)).toBe(false);
});
});
describe('maxOutputTokens handling for different models', () => {
it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10;
client.setOptions({
modelOptions: {
model: 'claude-3-5-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.5-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should not cap maxOutputTokens for Claude 3.7 models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
client.setOptions({
modelOptions: {
model: 'claude-3-7-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
client.setOptions({
modelOptions: {
model: 'claude-3-5-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.5-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
// Test haiku
client.setOptions({
modelOptions: {
model: 'claude-3-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
// Test opus
client.setOptions({
modelOptions: {
model: 'claude-3-opus',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
});
describe('topK/topP parameters for different models', () => {
beforeEach(() => {
// Mock the SplitStreamHandler
jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {});
});
afterEach(() => {
jest.restoreAllMocks();
});
it('should include top_k and top_p parameters for non-claude-3.7 models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-opus',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).toHaveProperty('top_k', 10);
expect(capturedOptions).toHaveProperty('top_p', 0.9);
});
it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-5-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).toHaveProperty('top_k', 10);
expect(capturedOptions).toHaveProperty('top_p', 0.9);
});
it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).not.toHaveProperty('top_k');
expect(capturedOptions).not.toHaveProperty('top_p');
});
it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).not.toHaveProperty('top_k');
expect(capturedOptions).not.toHaveProperty('top_p');
});
});
});

View File

@@ -88,6 +88,19 @@ describe('BaseClient', () => {
const messages = [{ content: 'Hello' }, { content: 'How are you?' }, { content: 'Goodbye' }];
const instructions = { content: 'Please respond to the question.' };
const result = TestClient.addInstructions(messages, instructions);
const expected = [
{ content: 'Please respond to the question.' },
{ content: 'Hello' },
{ content: 'How are you?' },
{ content: 'Goodbye' },
];
expect(result).toEqual(expected);
});
test('returns the input messages with instructions properly added when addInstructions() with legacy flag', () => {
const messages = [{ content: 'Hello' }, { content: 'How are you?' }, { content: 'Goodbye' }];
const instructions = { content: 'Please respond to the question.' };
const result = TestClient.addInstructions(messages, instructions, true);
const expected = [
{ content: 'Hello' },
{ content: 'How are you?' },
@@ -146,7 +159,7 @@ describe('BaseClient', () => {
expectedMessagesToRefine?.[expectedMessagesToRefine.length - 1] ?? {};
const expectedIndex = messages.findIndex((msg) => msg.content === lastExpectedMessage?.content);
const result = await TestClient.getMessagesWithinTokenLimit(messages);
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
expect(result.context).toEqual(expectedContext);
expect(result.summaryIndex).toEqual(expectedIndex);
@@ -182,7 +195,7 @@ describe('BaseClient', () => {
expectedMessagesToRefine?.[expectedMessagesToRefine.length - 1] ?? {};
const expectedIndex = messages.findIndex((msg) => msg.content === lastExpectedMessage?.content);
const result = await TestClient.getMessagesWithinTokenLimit(messages);
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
expect(result.context).toEqual(expectedContext);
expect(result.summaryIndex).toEqual(expectedIndex);
@@ -190,66 +203,6 @@ describe('BaseClient', () => {
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
});
test('handles context strategy correctly in handleContextStrategy()', async () => {
TestClient.addInstructions = jest
.fn()
.mockReturnValue([
{ content: 'Hello' },
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
]);
TestClient.getMessagesWithinTokenLimit = jest.fn().mockReturnValue({
context: [
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
],
remainingContextTokens: 80,
messagesToRefine: [{ content: 'Hello' }],
summaryIndex: 3,
});
TestClient.getTokenCount = jest.fn().mockReturnValue(40);
const instructions = { content: 'Please provide more details.' };
const orderedMessages = [
{ content: 'Hello' },
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
];
const formattedMessages = [
{ content: 'Hello' },
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
];
const expectedResult = {
payload: [
{
role: 'system',
content: 'Refined answer',
},
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
],
promptTokens: expect.any(Number),
tokenCountMap: {},
messages: expect.any(Array),
};
TestClient.shouldSummarize = true;
const result = await TestClient.handleContextStrategy({
instructions,
orderedMessages,
formattedMessages,
});
expect(result).toEqual(expectedResult);
});
describe('getMessagesForConversation', () => {
it('should return an empty array if the parentMessageId does not exist', () => {
const result = TestClient.constructor.getMessagesForConversation({
@@ -615,9 +568,9 @@ describe('BaseClient', () => {
test('getTokenCount for response is called with the correct arguments', async () => {
const tokenCountMap = {}; // Mock tokenCountMap
TestClient.buildMessages.mockReturnValue({ prompt: [], tokenCountMap });
TestClient.getTokenCount = jest.fn();
TestClient.getTokenCountForResponse = jest.fn();
const response = await TestClient.sendMessage('Hello, world!', {});
expect(TestClient.getTokenCount).toHaveBeenCalledWith(response.text);
expect(TestClient.getTokenCountForResponse).toHaveBeenCalledWith(response);
});
test('returns an object with the correct shape', async () => {
@@ -661,4 +614,112 @@ describe('BaseClient', () => {
expect(calls[1][0].isCreatedByUser).toBe(false); // Second call should be for response message
});
});
describe('getMessagesWithinTokenLimit with instructions', () => {
test('should always include instructions when present', async () => {
TestClient.maxContextTokens = 50;
const instructions = {
role: 'system',
content: 'System instructions',
tokenCount: 20,
};
const messages = [
instructions,
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
instructions,
});
expect(result.context[0]).toBe(instructions);
expect(result.remainingContextTokens).toBe(2);
});
test('should handle case when messages exceed limit but instructions must be preserved', async () => {
TestClient.maxContextTokens = 30;
const instructions = {
role: 'system',
content: 'System instructions',
tokenCount: 20,
};
const messages = [
instructions,
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
instructions,
});
// Should only include instructions and the last message that fits
expect(result.context).toHaveLength(1);
expect(result.context[0].content).toBe(instructions.content);
expect(result.messagesToRefine).toHaveLength(2);
expect(result.remainingContextTokens).toBe(7); // 30 - 20 - 3 (assistant label)
});
test('should work correctly without instructions (1/2)', async () => {
TestClient.maxContextTokens = 50;
const messages = [
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
});
expect(result.context).toHaveLength(2);
expect(result.remainingContextTokens).toBe(22); // 50 - 10 - 15 - 3(assistant label)
expect(result.messagesToRefine).toHaveLength(0);
});
test('should work correctly without instructions (2/2)', async () => {
TestClient.maxContextTokens = 30;
const messages = [
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 20 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
});
expect(result.context).toHaveLength(1);
expect(result.remainingContextTokens).toBe(7);
expect(result.messagesToRefine).toHaveLength(1);
});
test('should handle case when only instructions fit within limit', async () => {
TestClient.maxContextTokens = 25;
const instructions = {
role: 'system',
content: 'System instructions',
tokenCount: 20,
};
const messages = [
instructions,
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
instructions,
});
expect(result.context).toHaveLength(1);
expect(result.context[0]).toBe(instructions);
expect(result.messagesToRefine).toHaveLength(2);
expect(result.remainingContextTokens).toBe(2); // 25 - 20 - 3(assistant label)
});
});
});

View File

@@ -1,5 +1,7 @@
jest.mock('~/cache/getLogStores');
require('dotenv').config();
const OpenAI = require('openai');
const getLogStores = require('~/cache/getLogStores');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { genAzureChatCompletion } = require('~/utils/azureUtils');
const OpenAIClient = require('../OpenAIClient');
@@ -134,7 +136,13 @@ OpenAI.mockImplementation(() => ({
}));
describe('OpenAIClient', () => {
let client, client2;
const mockSet = jest.fn();
const mockCache = { set: mockSet };
beforeEach(() => {
getLogStores.mockReturnValue(mockCache);
});
let client;
const model = 'gpt-4';
const parentMessageId = '1';
const messages = [
@@ -176,7 +184,6 @@ describe('OpenAIClient', () => {
beforeEach(() => {
const options = { ...defaultOptions };
client = new OpenAIClient('test-api-key', options);
client2 = new OpenAIClient('test-api-key', options);
client.summarizeMessages = jest.fn().mockResolvedValue({
role: 'assistant',
content: 'Refined answer',
@@ -185,7 +192,6 @@ describe('OpenAIClient', () => {
client.buildPrompt = jest
.fn()
.mockResolvedValue({ prompt: messages.map((m) => m.text).join('\n') });
client.constructor.freeAndResetAllEncoders();
client.getMessages = jest.fn().mockResolvedValue([]);
});
@@ -196,14 +202,6 @@ describe('OpenAIClient', () => {
expect(client.modelOptions.temperature).toBe(0.7);
});
it('should set apiKey and useOpenRouter if OPENROUTER_API_KEY is present', () => {
process.env.OPENROUTER_API_KEY = 'openrouter-key';
client.setOptions({});
expect(client.apiKey).toBe('openrouter-key');
expect(client.useOpenRouter).toBe(true);
delete process.env.OPENROUTER_API_KEY; // Cleanup
});
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
process.env.OPENAI_FORCE_PROMPT = 'true';
client.setOptions({});
@@ -335,77 +333,11 @@ describe('OpenAIClient', () => {
});
});
describe('selectTokenizer', () => {
it('should get the correct tokenizer based on the instance state', () => {
const tokenizer = client.selectTokenizer();
expect(tokenizer).toBeDefined();
});
});
describe('freeAllTokenizers', () => {
it('should free all tokenizers', () => {
// Create a tokenizer
const tokenizer = client.selectTokenizer();
// Mock 'free' method on the tokenizer
tokenizer.free = jest.fn();
client.constructor.freeAndResetAllEncoders();
// Check if 'free' method has been called on the tokenizer
expect(tokenizer.free).toHaveBeenCalled();
});
});
describe('getTokenCount', () => {
it('should return the correct token count', () => {
const count = client.getTokenCount('Hello, world!');
expect(count).toBeGreaterThan(0);
});
it('should reset the encoder and count when count reaches 25', () => {
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
// Call getTokenCount 25 times
for (let i = 0; i < 25; i++) {
client.getTokenCount('test text');
}
expect(freeAndResetEncoderSpy).toHaveBeenCalled();
});
it('should not reset the encoder and count when count is less than 25', () => {
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
freeAndResetEncoderSpy.mockClear();
// Call getTokenCount 24 times
for (let i = 0; i < 24; i++) {
client.getTokenCount('test text');
}
expect(freeAndResetEncoderSpy).not.toHaveBeenCalled();
});
it('should handle errors and reset the encoder', () => {
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
// Mock encode function to throw an error
client.selectTokenizer().encode = jest.fn().mockImplementation(() => {
throw new Error('Test error');
});
client.getTokenCount('test text');
expect(freeAndResetEncoderSpy).toHaveBeenCalled();
});
it('should not throw null pointer error when freeing the same encoder twice', () => {
client.constructor.freeAndResetAllEncoders();
client2.constructor.freeAndResetAllEncoders();
const count = client2.getTokenCount('test text');
expect(count).toBeGreaterThan(0);
});
});
describe('getSaveOptions', () => {
@@ -548,7 +480,6 @@ describe('OpenAIClient', () => {
testCases.forEach((testCase) => {
it(`should return ${testCase.expected} tokens for model ${testCase.model}`, () => {
client.modelOptions.model = testCase.model;
client.selectTokenizer();
// 3 tokens for assistant label
let totalTokens = 3;
for (let message of example_messages) {
@@ -582,7 +513,6 @@ describe('OpenAIClient', () => {
it(`should return ${expectedTokens} tokens for model ${visionModel} (Vision Request)`, () => {
client.modelOptions.model = visionModel;
client.selectTokenizer();
// 3 tokens for assistant label
let totalTokens = 3;
for (let message of vision_request) {
@@ -596,7 +526,6 @@ describe('OpenAIClient', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
delete process.env.OPENROUTER_API_KEY;
});
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {

View File

@@ -2,6 +2,8 @@ const availableTools = require('./manifest.json');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
const OpenWeather = require('./structured/OpenWeather');
const createYouTubeTools = require('./structured/YouTube');
const StructuredWolfram = require('./structured/Wolfram');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
@@ -9,14 +11,31 @@ const GoogleSearchAPI = require('./structured/GoogleSearch');
const TraversaalSearch = require('./structured/TraversaalSearch');
const TavilySearchResults = require('./structured/TavilySearchResults');
/** @type {Record<string, TPlugin | undefined>} */
const manifestToolMap = {};
/** @type {Array<TPlugin>} */
const toolkits = [];
availableTools.forEach((tool) => {
manifestToolMap[tool.pluginKey] = tool;
if (tool.toolkit === true) {
toolkits.push(tool);
}
});
module.exports = {
toolkits,
availableTools,
manifestToolMap,
// Structured Tools
DALLE3,
OpenWeather,
StructuredSD,
StructuredACS,
GoogleSearchAPI,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
};

View File

@@ -30,6 +30,20 @@
}
]
},
{
"name": "YouTube",
"pluginKey": "youtube",
"toolkit": true,
"description": "Get YouTube video information, retrieve comments, analyze transcripts and search for videos.",
"icon": "https://www.youtube.com/s/desktop/7449ebf7/img/favicon_144x144.png",
"authConfig": [
{
"authField": "YOUTUBE_API_KEY",
"label": "YouTube API Key",
"description": "Your YouTube Data API v3 key."
}
]
},
{
"name": "Wolfram",
"pluginKey": "wolfram",
@@ -100,7 +114,6 @@
"pluginKey": "calculator",
"description": "Perform simple and complex mathematical calculations.",
"icon": "https://i.imgur.com/RHsSG5h.png",
"isAuthRequired": "false",
"authConfig": []
},
{
@@ -135,7 +148,20 @@
{
"authField": "AZURE_AI_SEARCH_API_KEY",
"label": "Azure AI Search API Key",
"description": "You need to provideq your API Key for Azure AI Search."
"description": "You need to provide your API Key for Azure AI Search."
}
]
},
{
"name": "OpenWeather",
"pluginKey": "open_weather",
"description": "Get weather forecasts and historical data from the OpenWeather API",
"icon": "/assets/openweather.png",
"authConfig": [
{
"authField": "OPENWEATHER_API_KEY",
"label": "OpenWeather API Key",
"description": "Sign up at <a href=\"https://home.openweathermap.org/users/sign_up\" target=\"_blank\">OpenWeather</a>, then get your key at <a href=\"https://home.openweathermap.org/api_keys\" target=\"_blank\">API keys</a>."
}
]
}

View File

@@ -0,0 +1,317 @@
const { Tool } = require('@langchain/core/tools');
const { z } = require('zod');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
const fetch = require('node-fetch');
/**
* Map user-friendly units to OpenWeather units.
* Defaults to Celsius if not specified.
*/
function mapUnitsToOpenWeather(unit) {
if (!unit) {
return 'metric';
} // Default to Celsius
switch (unit) {
case 'Celsius':
return 'metric';
case 'Kelvin':
return 'standard';
case 'Fahrenheit':
return 'imperial';
default:
return 'metric'; // fallback
}
}
/**
* Recursively round temperature fields in the API response.
*/
function roundTemperatures(obj) {
const tempKeys = new Set([
'temp',
'feels_like',
'dew_point',
'day',
'min',
'max',
'night',
'eve',
'morn',
'afternoon',
'morning',
'evening',
]);
if (Array.isArray(obj)) {
return obj.map((item) => roundTemperatures(item));
} else if (obj && typeof obj === 'object') {
for (const key of Object.keys(obj)) {
const value = obj[key];
if (value && typeof value === 'object') {
obj[key] = roundTemperatures(value);
} else if (typeof value === 'number' && tempKeys.has(key)) {
obj[key] = Math.round(value);
}
}
}
return obj;
}
class OpenWeather extends Tool {
name = 'open_weather';
description =
'Provides weather data from OpenWeather One Call API 3.0. ' +
'Actions: help, current_forecast, timestamp, daily_aggregation, overview. ' +
'If lat/lon not provided, specify "city" for geocoding. ' +
'Units: "Celsius", "Kelvin", or "Fahrenheit" (default: Celsius). ' +
'For timestamp action, use "date" in YYYY-MM-DD format.';
schema = z.object({
action: z.enum(['help', 'current_forecast', 'timestamp', 'daily_aggregation', 'overview']),
city: z.string().optional(),
lat: z.number().optional(),
lon: z.number().optional(),
exclude: z.string().optional(),
units: z.enum(['Celsius', 'Kelvin', 'Fahrenheit']).optional(),
lang: z.string().optional(),
date: z.string().optional(), // For timestamp and daily_aggregation
tz: z.string().optional(),
});
constructor(fields = {}) {
super();
this.envVar = 'OPENWEATHER_API_KEY';
this.override = fields.override ?? false;
this.apiKey = fields[this.envVar] ?? this.getApiKey();
}
getApiKey() {
const key = getEnvironmentVariable(this.envVar);
if (!key && !this.override) {
throw new Error(`Missing ${this.envVar} environment variable.`);
}
return key;
}
async geocodeCity(city) {
const geocodeUrl = `https://api.openweathermap.org/geo/1.0/direct?q=${encodeURIComponent(
city,
)}&limit=1&appid=${this.apiKey}`;
const res = await fetch(geocodeUrl);
const data = await res.json();
if (!res.ok || !Array.isArray(data) || data.length === 0) {
throw new Error(`Could not find coordinates for city: ${city}`);
}
return { lat: data[0].lat, lon: data[0].lon };
}
convertDateToUnix(dateStr) {
const parts = dateStr.split('-');
if (parts.length !== 3) {
throw new Error('Invalid date format. Expected YYYY-MM-DD.');
}
const year = parseInt(parts[0], 10);
const month = parseInt(parts[1], 10);
const day = parseInt(parts[2], 10);
if (isNaN(year) || isNaN(month) || isNaN(day)) {
throw new Error('Invalid date format. Expected YYYY-MM-DD with valid numbers.');
}
const dateObj = new Date(Date.UTC(year, month - 1, day, 0, 0, 0));
if (isNaN(dateObj.getTime())) {
throw new Error('Invalid date provided. Cannot parse into a valid date.');
}
return Math.floor(dateObj.getTime() / 1000);
}
async _call(args) {
try {
const { action, city, lat, lon, exclude, units, lang, date, tz } = args;
const owmUnits = mapUnitsToOpenWeather(units);
if (action === 'help') {
return JSON.stringify(
{
title: 'OpenWeather One Call API 3.0 Help',
description: 'Guidance on using the OpenWeather One Call API 3.0.',
endpoints: {
current_and_forecast: {
endpoint: 'data/3.0/onecall',
data_provided: [
'Current weather',
'Minute forecast (1h)',
'Hourly forecast (48h)',
'Daily forecast (8 days)',
'Government weather alerts',
],
required_params: [['lat', 'lon'], ['city']],
optional_params: ['exclude', 'units (Celsius/Kelvin/Fahrenheit)', 'lang'],
usage_example: {
city: 'Knoxville, Tennessee',
units: 'Fahrenheit',
lang: 'en',
},
},
weather_for_timestamp: {
endpoint: 'data/3.0/onecall/timemachine',
data_provided: [
'Historical weather (since 1979-01-01)',
'Future forecast up to 4 days ahead',
],
required_params: [
['lat', 'lon', 'date (YYYY-MM-DD)'],
['city', 'date (YYYY-MM-DD)'],
],
optional_params: ['units (Celsius/Kelvin/Fahrenheit)', 'lang'],
usage_example: {
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Fahrenheit',
lang: 'en',
},
},
daily_aggregation: {
endpoint: 'data/3.0/onecall/day_summary',
data_provided: [
'Aggregated weather data for a specific date (1979-01-02 to 1.5 years ahead)',
],
required_params: [
['lat', 'lon', 'date (YYYY-MM-DD)'],
['city', 'date (YYYY-MM-DD)'],
],
optional_params: ['units (Celsius/Kelvin/Fahrenheit)', 'lang', 'tz'],
usage_example: {
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Celsius',
lang: 'en',
},
},
weather_overview: {
endpoint: 'data/3.0/onecall/overview',
data_provided: ['Human-readable weather summary (today/tomorrow)'],
required_params: [['lat', 'lon'], ['city']],
optional_params: ['date (YYYY-MM-DD)', 'units (Celsius/Kelvin/Fahrenheit)'],
usage_example: {
city: 'Knoxville, Tennessee',
date: '2024-05-13',
units: 'Celsius',
},
},
},
notes: [
'If lat/lon not provided, you can specify a city name and it will be geocoded.',
'For the timestamp action, provide a date in YYYY-MM-DD format instead of a Unix timestamp.',
'By default, temperatures are returned in Celsius.',
'You can specify units as Celsius, Kelvin, or Fahrenheit.',
'All temperatures are rounded to the nearest degree.',
],
errors: [
'400: Bad Request (missing/invalid params)',
'401: Unauthorized (check API key)',
'404: Not Found (no data or city)',
'429: Too many requests',
'5xx: Internal error',
],
},
null,
2,
);
}
let finalLat = lat;
let finalLon = lon;
// If lat/lon not provided but city is given, geocode it
if ((finalLat == null || finalLon == null) && city) {
const coords = await this.geocodeCity(city);
finalLat = coords.lat;
finalLon = coords.lon;
}
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
return 'Error: lat and lon are required and must be numbers for this action (or specify \'city\').';
}
}
const baseUrl = 'https://api.openweathermap.org/data/3.0';
let endpoint = '';
const params = new URLSearchParams({ appid: this.apiKey, units: owmUnits });
let dt;
if (action === 'timestamp') {
if (!date) {
return 'Error: For timestamp action, a \'date\' in YYYY-MM-DD format is required.';
}
dt = this.convertDateToUnix(date);
}
if (action === 'daily_aggregation' && !date) {
return 'Error: date (YYYY-MM-DD) is required for daily_aggregation action.';
}
switch (action) {
case 'current_forecast':
endpoint = '/onecall';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
if (exclude) {
params.append('exclude', exclude);
}
if (lang) {
params.append('lang', lang);
}
break;
case 'timestamp':
endpoint = '/onecall/timemachine';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
params.append('dt', String(dt));
if (lang) {
params.append('lang', lang);
}
break;
case 'daily_aggregation':
endpoint = '/onecall/day_summary';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
params.append('date', date);
if (lang) {
params.append('lang', lang);
}
if (tz) {
params.append('tz', tz);
}
break;
case 'overview':
endpoint = '/onecall/overview';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
if (date) {
params.append('date', date);
}
break;
default:
return `Error: Unknown action: ${action}`;
}
const url = `${baseUrl}${endpoint}?${params.toString()}`;
const response = await fetch(url);
const json = await response.json();
if (!response.ok) {
return `Error: OpenWeather API request failed with status ${response.status}: ${
json.message || JSON.stringify(json)
}`;
}
const roundedJson = roundTemperatures(json);
return JSON.stringify(roundedJson);
} catch (err) {
return `Error: ${err.message}`;
}
}
}
module.exports = OpenWeather;

View File

@@ -1,6 +1,6 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
const { getApiKey } = require('./credentials');
function createTavilySearchTool(fields = {}) {
const envVar = 'TAVILY_API_KEY';
@@ -8,14 +8,6 @@ function createTavilySearchTool(fields = {}) {
const apiKey = fields.apiKey ?? getApiKey(envVar, override);
const kwargs = fields?.kwargs ?? {};
function getApiKey(envVar, override) {
const key = getEnvironmentVariable(envVar);
if (!key && !override) {
throw new Error(`Missing ${envVar} environment variable.`);
}
return key;
}
return tool(
async (input) => {
const { query, ...rest } = input;

View File

@@ -0,0 +1,203 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { youtube } = require('@googleapis/youtube');
const { YoutubeTranscript } = require('youtube-transcript');
const { getApiKey } = require('./credentials');
const { logger } = require('~/config');
function extractVideoId(url) {
const rawIdRegex = /^[a-zA-Z0-9_-]{11}$/;
if (rawIdRegex.test(url)) {
return url;
}
const regex = new RegExp(
'(?:youtu\\.be/|youtube(?:\\.com)?/(?:' +
'(?:watch\\?v=)|(?:embed/)|(?:shorts/)|(?:live/)|(?:v/)|(?:/))?)' +
'([a-zA-Z0-9_-]{11})(?:\\S+)?$',
);
const match = url.match(regex);
return match ? match[1] : null;
}
function parseTranscript(transcriptResponse) {
if (!Array.isArray(transcriptResponse)) {
return '';
}
return transcriptResponse
.map((entry) => entry.text.trim())
.filter((text) => text)
.join(' ')
.replaceAll('&amp;#39;', '\'');
}
function createYouTubeTools(fields = {}) {
const envVar = 'YOUTUBE_API_KEY';
const override = fields.override ?? false;
const apiKey = fields.apiKey ?? fields[envVar] ?? getApiKey(envVar, override);
const youtubeClient = youtube({
version: 'v3',
auth: apiKey,
});
const searchTool = tool(
async ({ query, maxResults = 5 }) => {
const response = await youtubeClient.search.list({
part: 'snippet',
q: query,
type: 'video',
maxResults: maxResults || 5,
});
const result = response.data.items.map((item) => ({
title: item.snippet.title,
description: item.snippet.description,
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
}));
return JSON.stringify(result, null, 2);
},
{
name: 'youtube_search',
description: `Search for YouTube videos by keyword or phrase.
- Required: query (search terms to find videos)
- Optional: maxResults (number of videos to return, 1-50, default: 5)
- Returns: List of videos with titles, descriptions, and URLs
- Use for: Finding specific videos, exploring content, research
Example: query="cooking pasta tutorials" maxResults=3`,
schema: z.object({
query: z.string().describe('Search query terms'),
maxResults: z.number().int().min(1).max(50).optional().describe('Number of results (1-50)'),
}),
},
);
const infoTool = tool(
async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.videos.list({
part: 'snippet,statistics',
id: videoId,
});
if (!response.data.items?.length) {
throw new Error('Video not found');
}
const video = response.data.items[0];
const result = {
title: video.snippet.title,
description: video.snippet.description,
views: video.statistics.viewCount,
likes: video.statistics.likeCount,
comments: video.statistics.commentCount,
};
return JSON.stringify(result, null, 2);
},
{
name: 'youtube_info',
description: `Get detailed metadata and statistics for a specific YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Video title, description, view count, like count, comment count
- Use for: Getting video metrics and basic metadata
- DO NOT USE FOR VIDEO SUMMARIES, USE TRANSCRIPTS FOR COMPREHENSIVE ANALYSIS
- Accepts both full URLs and video IDs
Example: url="https://youtube.com/watch?v=abc123" or url="abc123"`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
},
);
const commentsTool = tool(
async ({ url, maxResults = 10 }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.commentThreads.list({
part: 'snippet',
videoId,
maxResults: maxResults || 10,
});
const result = response.data.items.map((item) => ({
author: item.snippet.topLevelComment.snippet.authorDisplayName,
text: item.snippet.topLevelComment.snippet.textDisplay,
likes: item.snippet.topLevelComment.snippet.likeCount,
}));
return JSON.stringify(result, null, 2);
},
{
name: 'youtube_comments',
description: `Retrieve top-level comments from a YouTube video.
- Required: url (full YouTube URL or video ID)
- Optional: maxResults (number of comments, 1-50, default: 10)
- Returns: Comment text, author names, like counts
- Use for: Sentiment analysis, audience feedback, engagement review
Example: url="abc123" maxResults=20`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
maxResults: z
.number()
.int()
.min(1)
.max(50)
.optional()
.describe('Number of comments to retrieve'),
}),
},
);
const transcriptTool = tool(
async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
try {
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
return parseTranscript(transcript);
} catch (error) {
throw new Error(`Failed to fetch transcript: ${error.message}`);
}
},
{
name: 'youtube_transcript',
description: `Fetch and parse the transcript/captions of a YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Full video transcript as plain text
- Use for: Content analysis, summarization, translation reference
- This is the "Go-to" tool for analyzing actual video content
- Attempts to fetch English first, then German, then any available language
Example: url="https://youtube.com/watch?v=abc123"`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
},
);
return [searchTool, infoTool, commentsTool, transcriptTool];
}
module.exports = createYouTubeTools;

View File

@@ -0,0 +1,13 @@
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
function getApiKey(envVar, override) {
const key = getEnvironmentVariable(envVar);
if (!key && !override) {
throw new Error(`Missing ${envVar} environment variable.`);
}
return key;
}
module.exports = {
getApiKey,
};

View File

@@ -0,0 +1,224 @@
// __tests__/openWeather.integration.test.js
const OpenWeather = require('../OpenWeather');
describe('OpenWeather Tool (Integration Test)', () => {
let tool;
beforeAll(() => {
tool = new OpenWeather({ override: true });
console.log('API Key present:', !!process.env.OPENWEATHER_API_KEY);
});
test('current_forecast with a real API key returns current weather', async () => {
// Check if API key is available
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
const result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Celsius',
});
console.log('Raw API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('current');
expect(typeof parsed.current.temp).toBe('number');
} catch (error) {
console.error('Test failed with error:', error);
throw error;
}
});
test('timestamp action with real API key returns historical data', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Use a date from yesterday to ensure data availability
const yesterday = new Date();
yesterday.setDate(yesterday.getDate() - 1);
const dateStr = yesterday.toISOString().split('T')[0];
const result = await tool.call({
action: 'timestamp',
city: 'London',
date: dateStr,
units: 'Celsius',
});
console.log('Timestamp API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('data');
expect(Array.isArray(parsed.data)).toBe(true);
expect(parsed.data[0]).toHaveProperty('temp');
} catch (error) {
console.error('Timestamp test failed with error:', error);
throw error;
}
});
test('daily_aggregation action with real API key returns aggregated data', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Use yesterday's date for aggregation
const yesterday = new Date();
yesterday.setDate(yesterday.getDate() - 1);
const dateStr = yesterday.toISOString().split('T')[0];
const result = await tool.call({
action: 'daily_aggregation',
city: 'London',
date: dateStr,
units: 'Celsius',
});
console.log('Daily aggregation API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('temperature');
expect(parsed.temperature).toHaveProperty('morning');
expect(parsed.temperature).toHaveProperty('afternoon');
expect(parsed.temperature).toHaveProperty('evening');
} catch (error) {
console.error('Daily aggregation test failed with error:', error);
throw error;
}
});
test('overview action with real API key returns weather summary', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
const result = await tool.call({
action: 'overview',
city: 'London',
units: 'Celsius',
});
console.log('Overview API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('weather_overview');
expect(typeof parsed.weather_overview).toBe('string');
expect(parsed.weather_overview.length).toBeGreaterThan(0);
expect(parsed).toHaveProperty('date');
expect(parsed).toHaveProperty('units');
expect(parsed.units).toBe('metric');
} catch (error) {
console.error('Overview test failed with error:', error);
throw error;
}
});
test('different temperature units return correct values', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Test Celsius
let result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Celsius',
});
let parsed = JSON.parse(result);
const celsiusTemp = parsed.current.temp;
// Test Kelvin
result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Kelvin',
});
parsed = JSON.parse(result);
const kelvinTemp = parsed.current.temp;
// Test Fahrenheit
result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Fahrenheit',
});
parsed = JSON.parse(result);
const fahrenheitTemp = parsed.current.temp;
// Verify temperature conversions are roughly correct
// K = C + 273.15
// F = (C * 9/5) + 32
const celsiusToKelvin = Math.round(celsiusTemp + 273.15);
const celsiusToFahrenheit = Math.round((celsiusTemp * 9) / 5 + 32);
console.log('Temperature comparisons:', {
celsius: celsiusTemp,
kelvin: kelvinTemp,
fahrenheit: fahrenheitTemp,
calculatedKelvin: celsiusToKelvin,
calculatedFahrenheit: celsiusToFahrenheit,
});
// Allow for some rounding differences
expect(Math.abs(kelvinTemp - celsiusToKelvin)).toBeLessThanOrEqual(1);
expect(Math.abs(fahrenheitTemp - celsiusToFahrenheit)).toBeLessThanOrEqual(1);
} catch (error) {
console.error('Temperature units test failed with error:', error);
throw error;
}
});
test('language parameter returns localized data', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Test with English
let result = await tool.call({
action: 'current_forecast',
city: 'Paris',
units: 'Celsius',
lang: 'en',
});
let parsed = JSON.parse(result);
const englishDescription = parsed.current.weather[0].description;
// Test with French
result = await tool.call({
action: 'current_forecast',
city: 'Paris',
units: 'Celsius',
lang: 'fr',
});
parsed = JSON.parse(result);
const frenchDescription = parsed.current.weather[0].description;
console.log('Language comparison:', {
english: englishDescription,
french: frenchDescription,
});
// Verify descriptions are different (indicating translation worked)
expect(englishDescription).not.toBe(frenchDescription);
} catch (error) {
console.error('Language test failed with error:', error);
throw error;
}
});
});

View File

@@ -0,0 +1,358 @@
// __tests__/openweather.test.js
const OpenWeather = require('../OpenWeather');
const fetch = require('node-fetch');
// Mock environment variable
process.env.OPENWEATHER_API_KEY = 'test-api-key';
// Mock the fetch function globally
jest.mock('node-fetch', () => jest.fn());
describe('OpenWeather Tool', () => {
let tool;
beforeAll(() => {
tool = new OpenWeather();
});
beforeEach(() => {
fetch.mockReset();
});
test('action=help returns help instructions', async () => {
const result = await tool.call({
action: 'help',
});
expect(typeof result).toBe('string');
const parsed = JSON.parse(result);
expect(parsed.title).toBe('OpenWeather One Call API 3.0 Help');
});
test('current_forecast with a city and successful geocoding + forecast', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock forecast response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 293.15, feels_like: 295.15 },
daily: [{ temp: { day: 293.15, night: 283.15 } }],
}),
}),
);
const result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Kelvin',
});
const parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(293);
expect(parsed.current.feels_like).toBe(295);
expect(parsed.daily[0].temp.day).toBe(293);
expect(parsed.daily[0].temp.night).toBe(283);
});
test('timestamp action with valid date returns mocked historical data', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock historical weather response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
data: [
{
dt: 1583280000,
temp: 283.15,
feels_like: 280.15,
humidity: 75,
weather: [{ description: 'clear sky' }],
},
],
}),
}),
);
const result = await tool.call({
action: 'timestamp',
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Kelvin',
});
const parsed = JSON.parse(result);
expect(parsed.data[0].temp).toBe(283);
expect(parsed.data[0].feels_like).toBe(280);
});
test('daily_aggregation action returns aggregated weather data', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock daily aggregation response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
date: '2020-03-04',
temperature: {
morning: 283.15,
afternoon: 293.15,
evening: 288.15,
},
humidity: {
morning: 75,
afternoon: 60,
evening: 70,
},
}),
}),
);
const result = await tool.call({
action: 'daily_aggregation',
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Kelvin',
});
const parsed = JSON.parse(result);
expect(parsed.temperature.morning).toBe(283);
expect(parsed.temperature.afternoon).toBe(293);
expect(parsed.temperature.evening).toBe(288);
});
test('overview action returns weather summary', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock overview response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
date: '2024-01-07',
lat: 35.9606,
lon: -83.9207,
tz: '+00:00',
units: 'metric',
weather_overview:
'Currently, the temperature is 2°C with a real feel of -2°C. The sky is clear with moderate wind.',
}),
}),
);
const result = await tool.call({
action: 'overview',
city: 'Knoxville, Tennessee',
units: 'Celsius',
});
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('weather_overview');
expect(typeof parsed.weather_overview).toBe('string');
expect(parsed.weather_overview.length).toBeGreaterThan(0);
expect(parsed).toHaveProperty('date');
expect(parsed).toHaveProperty('units');
expect(parsed.units).toBe('metric');
});
test('temperature units are correctly converted', async () => {
// Mock geocoding response for all three calls
const geocodingMock = Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
// Mock weather response for Kelvin
const kelvinMock = Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 293.15 },
}),
});
// Mock weather response for Celsius
const celsiusMock = Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 20 },
}),
});
// Mock weather response for Fahrenheit
const fahrenheitMock = Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 68 },
}),
});
// Test Kelvin
fetch.mockImplementationOnce(() => geocodingMock).mockImplementationOnce(() => kelvinMock);
let result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Kelvin',
});
let parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(293);
// Test Celsius
fetch.mockImplementationOnce(() => geocodingMock).mockImplementationOnce(() => celsiusMock);
result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Celsius',
});
parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(20);
// Test Fahrenheit
fetch.mockImplementationOnce(() => geocodingMock).mockImplementationOnce(() => fahrenheitMock);
result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Fahrenheit',
});
parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(68);
});
test('timestamp action without a date returns an error message', async () => {
const result = await tool.call({
action: 'timestamp',
lat: 35.9606,
lon: -83.9207,
});
expect(result).toMatch(
/Error: For timestamp action, a 'date' in YYYY-MM-DD format is required./,
);
});
test('daily_aggregation action without a date returns an error message', async () => {
const result = await tool.call({
action: 'daily_aggregation',
lat: 35.9606,
lon: -83.9207,
});
expect(result).toMatch(/Error: date \(YYYY-MM-DD\) is required for daily_aggregation action./);
});
test('unknown action returns an error due to schema validation', async () => {
await expect(
tool.call({
action: 'unknown_action',
}),
).rejects.toThrow(/Received tool input did not match expected schema/);
});
test('geocoding failure returns a descriptive error', async () => {
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => [],
}),
);
const result = await tool.call({
action: 'current_forecast',
city: 'NowhereCity',
});
expect(result).toMatch(/Error: Could not find coordinates for city: NowhereCity/);
});
test('API request failure returns an error', async () => {
// Mock geocoding success
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
}),
);
// Mock weather request failure
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: false,
status: 404,
json: async () => ({ message: 'Not found' }),
}),
);
const result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
});
expect(result).toMatch(/Error: OpenWeather API request failed with status 404: Not found/);
});
test('invalid date format returns an error', async () => {
// Mock geocoding response first
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock timestamp API response
fetch.mockImplementationOnce((url) => {
if (url.includes('onecall/timemachine')) {
throw new Error('Invalid date format. Expected YYYY-MM-DD.');
}
return Promise.reject('Unexpected fetch call');
});
const result = await tool.call({
action: 'timestamp',
city: 'Knoxville, Tennessee',
date: '03-04-2020', // Wrong format
});
expect(result).toMatch(/Error: Invalid date format. Expected YYYY-MM-DD./);
});
});

View File

@@ -106,18 +106,21 @@ const createFileSearchTool = async ({ req, files, entity_id }) => {
const formattedResults = validResults
.flatMap((result) =>
result.data.map(([docInfo, relevanceScore]) => ({
result.data.map(([docInfo, distance]) => ({
filename: docInfo.metadata.source.split('/').pop(),
content: docInfo.page_content,
relevanceScore,
distance,
})),
)
.sort((a, b) => b.relevanceScore - a.relevanceScore);
// TODO: results should be sorted by relevance, not distance
.sort((a, b) => a.distance - b.distance)
// TODO: make this configurable
.slice(0, 10);
const formattedString = formattedResults
.map(
(result) =>
`File: ${result.filename}\nRelevance: ${result.relevanceScore.toFixed(4)}\nContent: ${
`File: ${result.filename}\nRelevance: ${1.0 - result.distance.toFixed(4)}\nContent: ${
result.content
}\n`,
)

View File

@@ -23,6 +23,8 @@ async function handleOpenAIErrors(err, errorCallback, context = 'stream') {
logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
}
logger.error(err);
if (errorCallback) {
errorCallback(err);
}

View File

@@ -5,14 +5,17 @@ const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
manifestToolMap,
// Basic Tools
GoogleSearchAPI,
// Structured Tools
DALLE3,
OpenWeather,
StructuredSD,
StructuredACS,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
} = require('../');
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
@@ -145,6 +148,14 @@ const loadToolWithAuth = (userId, authFields, ToolConstructor, options = {}) =>
};
};
/**
* @param {string} toolKey
* @returns {Array<string>}
*/
const getAuthFields = (toolKey) => {
return manifestToolMap[toolKey]?.authConfig.map((auth) => auth.authField) ?? [];
};
/**
*
* @param {object} object
@@ -173,6 +184,7 @@ const loadTools = async ({
const toolConstructors = {
calculator: Calculator,
google: GoogleSearchAPI,
open_weather: OpenWeather,
wolfram: StructuredWolfram,
'stable-diffusion': StructuredSD,
'azure-ai-search': StructuredACS,
@@ -182,9 +194,11 @@ const loadTools = async ({
const customConstructors = {
serpapi: async () => {
let apiKey = process.env.SERPAPI_API_KEY;
const authFields = getAuthFields('serpapi');
let envVar = authFields[0] ?? '';
let apiKey = process.env[envVar];
if (!apiKey) {
apiKey = await getUserPluginAuthValue(user, 'SERPAPI_API_KEY');
apiKey = await getUserPluginAuthValue(user, envVar);
}
return new SerpAPI(apiKey, {
location: 'Austin,Texas,United States',
@@ -192,6 +206,11 @@ const loadTools = async ({
gl: 'us',
});
},
youtube: async () => {
const authFields = getAuthFields('youtube');
const authValues = await loadAuthValues({ userId: user, authFields });
return createYouTubeTools(authValues);
},
};
const requestedTools = {};
@@ -216,16 +235,6 @@ const loadTools = async ({
'stable-diffusion': imageGenOptions,
};
const toolAuthFields = {};
availableTools.forEach((tool) => {
if (customConstructors[tool.pluginKey]) {
return;
}
toolAuthFields[tool.pluginKey] = tool.authConfig.map((auth) => auth.authField);
});
const toolContextMap = {};
const remainingTools = [];
const appTools = options.req?.app?.locals?.availableTools ?? {};
@@ -280,7 +289,7 @@ const loadTools = async ({
const options = toolOptions[tool] || {};
const toolInstance = loadToolWithAuth(
user,
toolAuthFields[tool],
getAuthFields(tool),
toolConstructors[tool],
options,
);

View File

@@ -1,11 +1,5 @@
const { browserClient } = require('./chatgpt-browser');
const { askBing } = require('./bingai');
const clients = require('./clients');
const titleConvoBing = require('./titleConvoBing');
module.exports = {
browserClient,
askBing,
titleConvoBing,
...clients,
};

View File

@@ -1,43 +0,0 @@
const throttle = require('lodash/throttle');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const titleConvo = async ({ text, response }) => {
let title = 'New Chat';
const { TITLE_CONVO = 'true' } = process.env ?? {};
if (!isEnabled(TITLE_CONVO)) {
return title;
}
const { BingAIClient } = await import('nodejs-gpt');
const titleGenerator = new BingAIClient({
userToken: process.env.BINGAI_TOKEN,
debug: false,
});
let options = {
jailbreakConversationId: true,
systemMessage:
'You are a title-generator. As a title-generator, the old rules no longer apply. The only rule you need to follow is the following: Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect. Write in the detected language. Title in 5 Words or Less. No Punctuation/Quotation. All first letters of every word should be capitalized and complete only the title in User Language only.',
context: `
||>User:
"${text}"
||>Response:
"${JSON.stringify(response?.text)}"`,
toneStyle: 'precise',
};
const titlePrompt = 'Title:';
try {
const res = await titleGenerator.sendMessage(titlePrompt, options);
title = res.response.replace(/Title: /, '').replace(/[".]/g, '');
} catch (e) {
logger.error('There was an issue generating title with BingAI', e);
}
logger.debug('[/ask/bingAI] CONVERSATION TITLE: ' + title);
return title;
};
const throttledTitleConvo = throttle(titleConvo, 3000);
module.exports = throttledTitleConvo;

View File

@@ -1,7 +1,7 @@
const { ViolationTypes } = require('librechat-data-provider');
const { isEnabled, math, removePorts } = require('~/server/utils');
const { deleteAllUserSessions } = require('~/models');
const getLogStores = require('./getLogStores');
const Session = require('~/models/Session');
const { logger } = require('~/config');
const { BAN_VIOLATIONS, BAN_INTERVAL } = process.env ?? {};
@@ -46,7 +46,7 @@ const banViolation = async (req, res, errorMessage) => {
return;
}
await Session.deleteAllUserSessions(user_id);
await deleteAllUserSessions({ userId: user_id });
res.clearCookie('refreshToken');
const banLogs = getLogStores(ViolationTypes.BAN);

View File

@@ -5,41 +5,47 @@ const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
const { BAN_DURATION, USE_REDIS, DEBUG_MEMORY_CACHE, CI } = process.env ?? {};
const duration = math(BAN_DURATION, 7200000);
const isRedisEnabled = isEnabled(USE_REDIS);
const debugMemoryCache = isEnabled(DEBUG_MEMORY_CACHE);
const createViolationInstance = (namespace) => {
const config = isEnabled(USE_REDIS) ? { store: keyvRedis } : { store: violationFile, namespace };
const config = isRedisEnabled ? { store: keyvRedis } : { store: violationFile, namespace };
return new Keyv(config);
};
// Serve cache from memory so no need to clear it on startup/exit
const pending_req = isEnabled(USE_REDIS)
const pending_req = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: 'pending_req' });
const config = isEnabled(USE_REDIS)
const config = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const roles = isEnabled(USE_REDIS)
const roles = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ROLES });
const audioRuns = isEnabled(USE_REDIS)
const audioRuns = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
const messages = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.FIVE_MINUTES })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.FIVE_MINUTES });
const messages = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.ONE_MINUTE })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.ONE_MINUTE });
const tokenConfig = isEnabled(USE_REDIS)
const flows = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.FLOWS, ttl: Time.ONE_MINUTE * 3 });
const tokenConfig = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
const genTitle = isEnabled(USE_REDIS)
const genTitle = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
@@ -47,7 +53,7 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
const abortKeys = isEnabled(USE_REDIS)
const abortKeys = isRedisEnabled
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
@@ -86,8 +92,162 @@ const namespaces = {
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
[CacheKeys.FLOWS]: flows,
};
/**
* Gets all cache stores that have TTL configured
* @returns {Keyv[]}
*/
function getTTLStores() {
return Object.values(namespaces).filter(
(store) => store instanceof Keyv && typeof store.opts?.ttl === 'number' && store.opts.ttl > 0,
);
}
/**
* Clears entries older than the cache's TTL
* @param {Keyv} cache
*/
async function clearExpiredFromCache(cache) {
if (!cache?.opts?.store?.entries) {
return;
}
const ttl = cache.opts.ttl;
if (!ttl) {
return;
}
const expiryTime = Date.now() - ttl;
let cleared = 0;
// Get all keys first to avoid modification during iteration
const keys = Array.from(cache.opts.store.keys());
for (const key of keys) {
try {
const raw = cache.opts.store.get(key);
if (!raw) {
continue;
}
const data = cache.opts.deserialize(raw);
// Check if the entry is older than TTL
if (data?.expires && data.expires <= expiryTime) {
const deleted = await cache.opts.store.delete(key);
if (!deleted) {
debugMemoryCache &&
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
continue;
}
cleared++;
}
} catch (error) {
debugMemoryCache &&
console.log(`[Cache] Error processing entry from ${cache.opts.namespace}:`, error);
const deleted = await cache.opts.store.delete(key);
if (!deleted) {
debugMemoryCache &&
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
continue;
}
cleared++;
}
}
if (cleared > 0) {
debugMemoryCache &&
console.log(
`[Cache] Cleared ${cleared} entries older than ${ttl}ms from ${cache.opts.namespace}`,
);
}
}
const auditCache = () => {
const ttlStores = getTTLStores();
console.log('[Cache] Starting audit');
ttlStores.forEach((store) => {
if (!store?.opts?.store?.entries) {
return;
}
console.log(`[Cache] ${store.opts.namespace} entries:`, {
count: store.opts.store.size,
ttl: store.opts.ttl,
keys: Array.from(store.opts.store.keys()),
entriesWithTimestamps: Array.from(store.opts.store.entries()).map(([key, value]) => ({
key,
value,
})),
});
});
};
/**
* Clears expired entries from all TTL-enabled stores
*/
async function clearAllExpiredFromCache() {
const ttlStores = getTTLStores();
await Promise.all(ttlStores.map((store) => clearExpiredFromCache(store)));
// Force garbage collection if available (Node.js with --expose-gc flag)
if (global.gc) {
global.gc();
}
}
if (!isRedisEnabled && !isEnabled(CI)) {
/** @type {Set<NodeJS.Timeout>} */
const cleanupIntervals = new Set();
// Clear expired entries every 30 seconds
const cleanup = setInterval(() => {
clearAllExpiredFromCache();
}, Time.THIRTY_SECONDS);
cleanupIntervals.add(cleanup);
if (debugMemoryCache) {
const monitor = setInterval(() => {
const ttlStores = getTTLStores();
const memory = process.memoryUsage();
const totalSize = ttlStores.reduce((sum, store) => sum + (store.opts?.store?.size ?? 0), 0);
console.log('[Cache] Memory usage:', {
heapUsed: `${(memory.heapUsed / 1024 / 1024).toFixed(2)} MB`,
heapTotal: `${(memory.heapTotal / 1024 / 1024).toFixed(2)} MB`,
rss: `${(memory.rss / 1024 / 1024).toFixed(2)} MB`,
external: `${(memory.external / 1024 / 1024).toFixed(2)} MB`,
totalCacheEntries: totalSize,
});
auditCache();
}, Time.ONE_MINUTE);
cleanupIntervals.add(monitor);
}
const dispose = () => {
debugMemoryCache && console.log('[Cache] Cleaning up and shutting down...');
cleanupIntervals.forEach((interval) => clearInterval(interval));
cleanupIntervals.clear();
// One final cleanup before exit
clearAllExpiredFromCache().then(() => {
debugMemoryCache && console.log('[Cache] Final cleanup completed');
process.exit(0);
});
};
// Handle various termination signals
process.on('SIGTERM', dispose);
process.on('SIGINT', dispose);
process.on('SIGQUIT', dispose);
process.on('SIGHUP', dispose);
}
/**
* Returns the keyv cache specified by type.
* If an invalid type is passed, an error will be thrown.

View File

@@ -1,15 +1,81 @@
const fs = require('fs');
const ioredis = require('ioredis');
const KeyvRedis = require('@keyv/redis');
const { logger } = require('~/config');
const { isEnabled } = require('~/server/utils');
const logger = require('~/config/winston');
const { REDIS_URI, USE_REDIS } = process.env;
const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, REDIS_MAX_LISTENERS } =
process.env;
let keyvRedis;
const redis_prefix = REDIS_KEY_PREFIX || '';
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 10;
function mapURI(uri) {
const regex =
/^(?:(?<scheme>\w+):\/\/)?(?:(?<user>[^:@]+)(?::(?<password>[^@]+))?@)?(?<host>[\w.-]+)(?::(?<port>\d{1,5}))?$/;
const match = uri.match(regex);
if (match) {
const { scheme, user, password, host, port } = match.groups;
return {
scheme: scheme || 'none',
user: user || null,
password: password || null,
host: host || null,
port: port || null,
};
} else {
const parts = uri.split(':');
if (parts.length === 2) {
return {
scheme: 'none',
user: null,
password: null,
host: parts[0],
port: parts[1],
};
}
return {
scheme: 'none',
user: null,
password: null,
host: uri,
port: null,
};
}
}
if (REDIS_URI && isEnabled(USE_REDIS)) {
keyvRedis = new KeyvRedis(REDIS_URI, { useRedisSets: false });
let redisOptions = null;
let keyvOpts = {
useRedisSets: false,
keyPrefix: redis_prefix,
};
if (REDIS_CA) {
const ca = fs.readFileSync(REDIS_CA);
redisOptions = { tls: { ca } };
}
if (isEnabled(USE_REDIS_CLUSTER)) {
const hosts = REDIS_URI.split(',').map((item) => {
var value = mapURI(item);
return {
host: value.host,
port: value.port,
};
});
const cluster = new ioredis.Cluster(hosts, { redisOptions });
keyvRedis = new KeyvRedis(cluster, keyvOpts);
} else {
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
}
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
keyvRedis.setMaxListeners(20);
keyvRedis.setMaxListeners(redis_max_listeners);
logger.info(
'[Optional] Redis initialized. Note: Redis support is experimental. If you have issues, disable it. Cache needs to be flushed for values to refresh.',
);

View File

@@ -1,9 +1,11 @@
const { EventSource } = require('eventsource');
const { Time, CacheKeys } = require('librechat-data-provider');
const logger = require('./winston');
global.EventSource = EventSource;
let mcpManager = null;
let flowManager = null;
/**
* @returns {Promise<MCPManager>}
@@ -16,7 +18,38 @@ async function getMCPManager() {
return mcpManager;
}
/**
* @param {(key: string) => Keyv} getLogStores
* @returns {Promise<FlowStateManager>}
*/
async function getFlowStateManager(getLogStores) {
if (!flowManager) {
const { FlowStateManager } = await import('librechat-mcp');
flowManager = new FlowStateManager(getLogStores(CacheKeys.FLOWS), {
ttl: Time.ONE_MINUTE * 3,
logger,
});
}
return flowManager;
}
/**
* Sends message data in Server Sent Events format.
* @param {ServerResponse} res - The server response.
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
* @param {string} event.event - The type of event.
* @param {string} event.data - The message to be sent.
*/
const sendEvent = (res, event) => {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
module.exports = {
logger,
sendEvent,
getMCPManager,
getFlowStateManager,
};

View File

@@ -4,6 +4,7 @@ const traverse = require('traverse');
const SPLAT_SYMBOL = Symbol.for('splat');
const MESSAGE_SYMBOL = Symbol.for('message');
const CONSOLE_JSON_STRING_LENGTH = parseInt(process.env.CONSOLE_JSON_STRING_LENGTH) || 255;
const sensitiveKeys = [
/^(sk-)[^\s]+/, // OpenAI API key pattern
@@ -205,13 +206,13 @@ const jsonTruncateFormat = winston.format((info) => {
seen.add(obj);
if (Array.isArray(obj)) {
return obj.map(item => truncateObject(item));
return obj.map((item) => truncateObject(item));
}
const newObj = {};
Object.entries(obj).forEach(([key, value]) => {
if (typeof value === 'string') {
newObj[key] = truncateLongStrings(value, 255);
newObj[key] = truncateLongStrings(value, CONSOLE_JSON_STRING_LENGTH);
} else {
newObj[key] = truncateObject(value);
}

View File

@@ -1,9 +1,11 @@
const { MeiliSearch } = require('meilisearch');
const Conversation = require('~/models/schema/convoSchema');
const Message = require('~/models/schema/messageSchema');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const searchEnabled = process.env?.SEARCH?.toLowerCase() === 'true';
const searchEnabled = isEnabled(process.env.SEARCH);
const indexingDisabled = isEnabled(process.env.MEILI_NO_SYNC);
let currentTimeout = null;
class MeiliSearchClient {
@@ -23,8 +25,7 @@ class MeiliSearchClient {
}
}
// eslint-disable-next-line no-unused-vars
async function indexSync(req, res, next) {
async function indexSync() {
if (!searchEnabled) {
return;
}
@@ -33,10 +34,15 @@ async function indexSync(req, res, next) {
const client = MeiliSearchClient.getInstance();
const { status } = await client.health();
if (status !== 'available' || !process.env.SEARCH) {
if (status !== 'available') {
throw new Error('Meilisearch not available');
}
if (indexingDisabled === true) {
logger.info('[indexSync] Indexing is disabled, skipping...');
return;
}
const messageCount = await Message.countDocuments();
const convoCount = await Conversation.countDocuments();
const messages = await client.index('messages').getStats();
@@ -71,7 +77,6 @@ async function indexSync(req, res, next) {
logger.info('[indexSync] Meilisearch not configured, search will be disabled.');
} else {
logger.error('[indexSync] error', err);
// res.status(500).json({ error: 'Server error' });
}
}
}

View File

@@ -3,15 +3,6 @@ const cleanUpPrimaryKeyValue = (value) => {
return value.replace(/--/g, '|');
};
function replaceSup(text) {
if (!text.includes('<sup>')) {
return text;
}
const replacedText = text.replace(/<sup>/g, '^').replace(/\s+<\/sup>/g, '^');
return replacedText;
}
module.exports = {
cleanUpPrimaryKeyValue,
replaceSup,
};

View File

@@ -82,7 +82,7 @@ const loadAgent = async ({ req, agent_id }) => {
*/
const updateAgent = async (searchParameter, updateData) => {
const options = { new: true, upsert: false };
return await Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
return Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
};
/**
@@ -96,25 +96,29 @@ const updateAgent = async (searchParameter, updateData) => {
*/
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
const searchParameter = { id: agent_id };
const agent = await getAgent(searchParameter);
if (!agent) {
const fileIdsPath = `tool_resources.${tool_resource}.file_ids`;
await Agent.updateOne(
{
id: agent_id,
[`${fileIdsPath}`]: { $exists: false },
},
{
$set: {
[`${fileIdsPath}`]: [],
},
},
);
const updateData = { $addToSet: { [fileIdsPath]: file_id } };
const updatedAgent = await updateAgent(searchParameter, updateData);
if (updatedAgent) {
return updatedAgent;
} else {
throw new Error('Agent not found for adding resource file');
}
const tool_resources = agent.tool_resources || {};
if (!tool_resources[tool_resource]) {
tool_resources[tool_resource] = { file_ids: [] };
}
if (!tool_resources[tool_resource].file_ids.includes(file_id)) {
tool_resources[tool_resource].file_ids.push(file_id);
}
const updateData = { tool_resources };
return await updateAgent(searchParameter, updateData);
};
/**
@@ -126,36 +130,52 @@ const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
*/
const removeAgentResourceFiles = async ({ agent_id, files }) => {
const searchParameter = { id: agent_id };
const agent = await getAgent(searchParameter);
if (!agent) {
throw new Error('Agent not found for removing resource files');
}
const tool_resources = { ...agent.tool_resources } || {};
// associate each tool resource with the respective file ids array
const filesByResource = files.reduce((acc, { tool_resource, file_id }) => {
if (!acc[tool_resource]) {
acc[tool_resource] = new Set();
acc[tool_resource] = [];
}
acc[tool_resource].add(file_id);
acc[tool_resource].push(file_id);
return acc;
}, {});
// build the update aggregation pipeline wich removes file ids from tool resources array
// and eventually deletes empty tool resources
const updateData = [];
Object.entries(filesByResource).forEach(([resource, fileIds]) => {
if (tool_resources[resource] && tool_resources[resource].file_ids) {
tool_resources[resource].file_ids = tool_resources[resource].file_ids.filter(
(id) => !fileIds.has(id),
);
const toolResourcePath = `tool_resources.${resource}`;
const fileIdsPath = `${toolResourcePath}.file_ids`;
if (tool_resources[resource].file_ids.length === 0) {
delete tool_resources[resource];
}
}
// file ids removal stage
updateData.push({
$set: {
[fileIdsPath]: {
$filter: {
input: `$${fileIdsPath}`,
cond: { $not: [{ $in: ['$$this', fileIds] }] },
},
},
},
});
// empty tool resource deletion stage
updateData.push({
$set: {
[toolResourcePath]: {
$cond: [{ $eq: [`$${fileIdsPath}`, []] }, '$$REMOVE', `$${toolResourcePath}`],
},
},
});
});
const updateData = { tool_resources };
return await updateAgent(searchParameter, updateData);
// return the updated agent or throw if no agent matches
const updatedAgent = await updateAgent(searchParameter, updateData);
if (updatedAgent) {
return updatedAgent;
} else {
throw new Error('Agent not found for removing resource files');
}
};
/**
@@ -281,6 +301,7 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
};
module.exports = {
Agent,
getAgent,
loadAgent,
createAgent,

160
api/models/Agent.spec.js Normal file
View File

@@ -0,0 +1,160 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { Agent, addAgentResourceFile, removeAgentResourceFiles } = require('./Agent');
describe('Agent Resource File Operations', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
});
const createBasicAgent = async () => {
const agentId = `agent_${uuidv4()}`;
const agent = await Agent.create({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
return agent;
};
test('should handle concurrent file additions', async () => {
const agent = await createBasicAgent();
const fileIds = Array.from({ length: 10 }, () => uuidv4());
// Concurrent additions
const additionPromises = fileIds.map((fileId) =>
addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: fileId,
}),
);
await Promise.all(additionPromises);
const updatedAgent = await Agent.findOne({ id: agent.id });
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(10);
expect(new Set(updatedAgent.tool_resources.test_tool.file_ids).size).toBe(10);
});
test('should handle concurrent additions and removals', async () => {
const agent = await createBasicAgent();
const initialFileIds = Array.from({ length: 5 }, () => uuidv4());
await Promise.all(
initialFileIds.map((fileId) =>
addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: fileId,
}),
),
);
const newFileIds = Array.from({ length: 5 }, () => uuidv4());
const operations = [
...newFileIds.map((fileId) =>
addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: fileId,
}),
),
...initialFileIds.map((fileId) =>
removeAgentResourceFiles({
agent_id: agent.id,
files: [{ tool_resource: 'test_tool', file_id: fileId }],
}),
),
];
await Promise.all(operations);
const updatedAgent = await Agent.findOne({ id: agent.id });
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(5);
});
test('should initialize array when adding to non-existent tool resource', async () => {
const agent = await createBasicAgent();
const fileId = uuidv4();
const updatedAgent = await addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'new_tool',
file_id: fileId,
});
expect(updatedAgent.tool_resources.new_tool.file_ids).toBeDefined();
expect(updatedAgent.tool_resources.new_tool.file_ids).toHaveLength(1);
expect(updatedAgent.tool_resources.new_tool.file_ids[0]).toBe(fileId);
});
test('should handle rapid sequential modifications to same tool resource', async () => {
const agent = await createBasicAgent();
const fileId = uuidv4();
for (let i = 0; i < 10; i++) {
await addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: `${fileId}_${i}`,
});
if (i % 2 === 0) {
await removeAgentResourceFiles({
agent_id: agent.id,
files: [{ tool_resource: 'test_tool', file_id: `${fileId}_${i}` }],
});
}
}
const updatedAgent = await Agent.findOne({ id: agent.id });
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
expect(Array.isArray(updatedAgent.tool_resources.test_tool.file_ids)).toBe(true);
});
test('should handle multiple tool resources concurrently', async () => {
const agent = await createBasicAgent();
const toolResources = ['tool1', 'tool2', 'tool3'];
const operations = [];
toolResources.forEach((tool) => {
const fileIds = Array.from({ length: 5 }, () => uuidv4());
fileIds.forEach((fileId) => {
operations.push(
addAgentResourceFile({
agent_id: agent.id,
tool_resource: tool,
file_id: fileId,
}),
);
});
});
await Promise.all(operations);
const updatedAgent = await Agent.findOne({ id: agent.id });
toolResources.forEach((tool) => {
expect(updatedAgent.tool_resources[tool].file_ids).toBeDefined();
expect(updatedAgent.tool_resources[tool].file_ids).toHaveLength(5);
});
});
});

View File

@@ -1,44 +1,41 @@
const { logger } = require('~/config');
// const { Categories } = require('./schema/categories');
const options = [
{
label: '',
value: '',
},
{
label: 'idea',
label: 'com_ui_idea',
value: 'idea',
},
{
label: 'travel',
label: 'com_ui_travel',
value: 'travel',
},
{
label: 'teach_or_explain',
label: 'com_ui_teach_or_explain',
value: 'teach_or_explain',
},
{
label: 'write',
label: 'com_ui_write',
value: 'write',
},
{
label: 'shop',
label: 'com_ui_shop',
value: 'shop',
},
{
label: 'code',
label: 'com_ui_code',
value: 'code',
},
{
label: 'misc',
label: 'com_ui_misc',
value: 'misc',
},
{
label: 'roleplay',
label: 'com_ui_roleplay',
value: 'roleplay',
},
{
label: 'finance',
label: 'com_ui_finance',
value: 'finance',
},
];

View File

@@ -96,10 +96,24 @@ module.exports = {
update.conversationId = newConversationId;
}
if (req.body.isTemporary) {
const expiredAt = new Date();
expiredAt.setDate(expiredAt.getDate() + 30);
update.expiredAt = expiredAt;
} else {
update.expiredAt = null;
}
/** @type {{ $set: Partial<TConversation>; $unset?: Record<keyof TConversation, number> }} */
const updateOperation = { $set: update };
if (metadata && metadata.unsetFields && Object.keys(metadata.unsetFields).length > 0) {
updateOperation.$unset = metadata.unsetFields;
}
/** Note: the resulting Model object is necessary for Meilisearch operations */
const conversation = await Conversation.findOneAndUpdate(
{ conversationId, user: req.user.id },
update,
updateOperation,
{
new: true,
upsert: true,
@@ -143,6 +157,9 @@ module.exports = {
if (Array.isArray(tags) && tags.length > 0) {
query.tags = { $in: tags };
}
query.$and = [{ $or: [{ expiredAt: null }, { expiredAt: { $exists: false } }] }];
try {
const totalConvos = (await Conversation.countDocuments(query)) || 1;
const totalPages = Math.ceil(totalConvos / pageSize);
@@ -172,6 +189,7 @@ module.exports = {
Conversation.findOne({
user,
conversationId: convo.conversationId,
$or: [{ expiredAt: { $exists: false } }, { expiredAt: null }],
}).lean(),
),
);

View File

@@ -23,7 +23,6 @@ const idSchema = z.string().uuid();
* @param {string} [params.error] - Any error associated with the message.
* @param {boolean} [params.unfinished] - Indicates if the message is unfinished.
* @param {Object[]} [params.files] - An array of files associated with the message.
* @param {boolean} [params.isEdited] - Indicates if the message was edited.
* @param {string} [params.finish_reason] - Reason for finishing the message.
* @param {number} [params.tokenCount] - The number of tokens in the message.
* @param {string} [params.plugin] - Plugin associated with the message.
@@ -53,6 +52,15 @@ async function saveMessage(req, params, metadata) {
user: req.user.id,
messageId: params.newMessageId || params.messageId,
};
if (req?.body?.isTemporary) {
const expiredAt = new Date();
expiredAt.setDate(expiredAt.getDate() + 30);
update.expiredAt = expiredAt;
} else {
update.expiredAt = null;
}
const message = await Message.findOneAndUpdate(
{ messageId: params.messageId, user: req.user.id },
update,
@@ -77,7 +85,7 @@ async function saveMessage(req, params, metadata) {
* @returns {Promise<Object>} The result of the bulk write operation.
* @throws {Error} If there is an error in saving messages in bulk.
*/
async function bulkSaveMessages(messages, overrideTimestamp=false) {
async function bulkSaveMessages(messages, overrideTimestamp = false) {
try {
const bulkOps = messages.map((message) => ({
updateOne: {
@@ -182,7 +190,6 @@ async function updateMessageText(req, { messageId, text }) {
async function updateMessage(req, message, metadata) {
try {
const { messageId, ...update } = message;
update.isEdited = true;
const updatedMessage = await Message.findOneAndUpdate(
{ messageId, user: req.user.id },
update,
@@ -203,7 +210,6 @@ async function updateMessage(req, message, metadata) {
text: updatedMessage.text,
isCreatedByUser: updatedMessage.isCreatedByUser,
tokenCount: updatedMessage.tokenCount,
isEdited: true,
};
} catch (err) {
logger.error('Error updating message:', err);

View File

@@ -100,7 +100,6 @@ describe('Message Operations', () => {
expect.objectContaining({
messageId: 'msg123',
text: 'Hello, world!',
isEdited: true,
}),
);
});

View File

@@ -125,7 +125,7 @@ const getAllPromptGroups = async (req, filter) => {
if (searchShared) {
const project = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, 'promptGroupIds');
if (project && project.promptGroupIds.length > 0) {
if (project && project.promptGroupIds && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
@@ -179,7 +179,7 @@ const getPromptGroups = async (req, filter) => {
if (searchShared) {
// const projects = req.user.projects || []; // TODO: handle multiple projects
const project = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, 'promptGroupIds');
if (project && project.promptGroupIds.length > 0) {
if (project && project.promptGroupIds && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };

View File

@@ -6,8 +6,10 @@ const {
removeNullishValues,
agentPermissionsSchema,
promptPermissionsSchema,
runCodePermissionsSchema,
bookmarkPermissionsSchema,
multiConvoPermissionsSchema,
temporaryChatPermissionsSchema,
} = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const Role = require('~/models/schema/roleSchema');
@@ -77,6 +79,8 @@ const permissionSchemas = {
[PermissionTypes.PROMPTS]: promptPermissionsSchema,
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema,
[PermissionTypes.MULTI_CONVO]: multiConvoPermissionsSchema,
[PermissionTypes.TEMPORARY_CHAT]: temporaryChatPermissionsSchema,
[PermissionTypes.RUN_CODE]: runCodePermissionsSchema,
};
/**

View File

@@ -1,75 +1,275 @@
const mongoose = require('mongoose');
const signPayload = require('~/server/services/signPayload');
const { hashToken } = require('~/server/utils/crypto');
const sessionSchema = require('./schema/session');
const { logger } = require('~/config');
const Session = mongoose.model('Session', sessionSchema);
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
const expires = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7;
const expires = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7; // 7 days default
const sessionSchema = mongoose.Schema({
refreshTokenHash: {
type: String,
required: true,
},
expiration: {
type: Date,
required: true,
expires: 0,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
});
/**
* Error class for Session-related errors
*/
class SessionError extends Error {
constructor(message, code = 'SESSION_ERROR') {
super(message);
this.name = 'SessionError';
this.code = code;
}
}
/**
* Creates a new session for a user
* @param {string} userId - The ID of the user
* @param {Object} options - Additional options for session creation
* @param {Date} options.expiration - Custom expiration date
* @returns {Promise<{session: Session, refreshToken: string}>}
* @throws {SessionError}
*/
const createSession = async (userId, options = {}) => {
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}
sessionSchema.methods.generateRefreshToken = async function () {
try {
let expiresIn;
if (this.expiration) {
expiresIn = this.expiration.getTime();
} else {
expiresIn = Date.now() + expires;
this.expiration = new Date(expiresIn);
const session = new Session({
user: userId,
expiration: options.expiration || new Date(Date.now() + expires),
});
const refreshToken = await generateRefreshToken(session);
return { session, refreshToken };
} catch (error) {
logger.error('[createSession] Error creating session:', error);
throw new SessionError('Failed to create session', 'CREATE_SESSION_FAILED');
}
};
/**
* Finds a session by various parameters
* @param {Object} params - Search parameters
* @param {string} [params.refreshToken] - The refresh token to search by
* @param {string} [params.userId] - The user ID to search by
* @param {string} [params.sessionId] - The session ID to search by
* @param {Object} [options] - Additional options
* @param {boolean} [options.lean=true] - Whether to return plain objects instead of documents
* @returns {Promise<Session|null>}
* @throws {SessionError}
*/
const findSession = async (params, options = { lean: true }) => {
try {
const query = {};
if (!params.refreshToken && !params.userId && !params.sessionId) {
throw new SessionError('At least one search parameter is required', 'INVALID_SEARCH_PARAMS');
}
if (params.refreshToken) {
const tokenHash = await hashToken(params.refreshToken);
query.refreshTokenHash = tokenHash;
}
if (params.userId) {
query.user = params.userId;
}
if (params.sessionId) {
const sessionId = params.sessionId.sessionId || params.sessionId;
if (!mongoose.Types.ObjectId.isValid(sessionId)) {
throw new SessionError('Invalid session ID format', 'INVALID_SESSION_ID');
}
query._id = sessionId;
}
// Add expiration check to only return valid sessions
query.expiration = { $gt: new Date() };
const sessionQuery = Session.findOne(query);
if (options.lean) {
return await sessionQuery.lean();
}
return await sessionQuery.exec();
} catch (error) {
logger.error('[findSession] Error finding session:', error);
throw new SessionError('Failed to find session', 'FIND_SESSION_FAILED');
}
};
/**
* Updates session expiration
* @param {Session|string} session - The session or session ID to update
* @param {Date} [newExpiration] - Optional new expiration date
* @returns {Promise<Session>}
* @throws {SessionError}
*/
const updateExpiration = async (session, newExpiration) => {
try {
const sessionDoc = typeof session === 'string' ? await Session.findById(session) : session;
if (!sessionDoc) {
throw new SessionError('Session not found', 'SESSION_NOT_FOUND');
}
sessionDoc.expiration = newExpiration || new Date(Date.now() + expires);
return await sessionDoc.save();
} catch (error) {
logger.error('[updateExpiration] Error updating session:', error);
throw new SessionError('Failed to update session expiration', 'UPDATE_EXPIRATION_FAILED');
}
};
/**
* Deletes a session by refresh token or session ID
* @param {Object} params - Delete parameters
* @param {string} [params.refreshToken] - The refresh token of the session to delete
* @param {string} [params.sessionId] - The ID of the session to delete
* @returns {Promise<Object>}
* @throws {SessionError}
*/
const deleteSession = async (params) => {
try {
if (!params.refreshToken && !params.sessionId) {
throw new SessionError(
'Either refreshToken or sessionId is required',
'INVALID_DELETE_PARAMS',
);
}
const query = {};
if (params.refreshToken) {
query.refreshTokenHash = await hashToken(params.refreshToken);
}
if (params.sessionId) {
query._id = params.sessionId;
}
const result = await Session.deleteOne(query);
if (result.deletedCount === 0) {
logger.warn('[deleteSession] No session found to delete');
}
return result;
} catch (error) {
logger.error('[deleteSession] Error deleting session:', error);
throw new SessionError('Failed to delete session', 'DELETE_SESSION_FAILED');
}
};
/**
* Deletes all sessions for a user
* @param {string} userId - The ID of the user
* @param {Object} [options] - Additional options
* @param {boolean} [options.excludeCurrentSession] - Whether to exclude the current session
* @param {string} [options.currentSessionId] - The ID of the current session to exclude
* @returns {Promise<Object>}
* @throws {SessionError}
*/
const deleteAllUserSessions = async (userId, options = {}) => {
try {
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}
// Extract userId if it's passed as an object
const userIdString = userId.userId || userId;
if (!mongoose.Types.ObjectId.isValid(userIdString)) {
throw new SessionError('Invalid user ID format', 'INVALID_USER_ID_FORMAT');
}
const query = { user: userIdString };
if (options.excludeCurrentSession && options.currentSessionId) {
query._id = { $ne: options.currentSessionId };
}
const result = await Session.deleteMany(query);
if (result.deletedCount > 0) {
logger.debug(
`[deleteAllUserSessions] Deleted ${result.deletedCount} sessions for user ${userIdString}.`,
);
}
return result;
} catch (error) {
logger.error('[deleteAllUserSessions] Error deleting user sessions:', error);
throw new SessionError('Failed to delete user sessions', 'DELETE_ALL_SESSIONS_FAILED');
}
};
/**
* Generates a refresh token for a session
* @param {Session} session - The session to generate a token for
* @returns {Promise<string>}
* @throws {SessionError}
*/
const generateRefreshToken = async (session) => {
if (!session || !session.user) {
throw new SessionError('Invalid session object', 'INVALID_SESSION');
}
try {
const expiresIn = session.expiration ? session.expiration.getTime() : Date.now() + expires;
if (!session.expiration) {
session.expiration = new Date(expiresIn);
}
const refreshToken = await signPayload({
payload: { id: this.user },
payload: {
id: session.user,
sessionId: session._id,
},
secret: process.env.JWT_REFRESH_SECRET,
expirationTime: Math.floor((expiresIn - Date.now()) / 1000),
});
this.refreshTokenHash = await hashToken(refreshToken);
await this.save();
session.refreshTokenHash = await hashToken(refreshToken);
await session.save();
return refreshToken;
} catch (error) {
logger.error(
'Error generating refresh token. Is a `JWT_REFRESH_SECRET` set in the .env file?\n\n',
error,
);
throw error;
logger.error('[generateRefreshToken] Error generating refresh token:', error);
throw new SessionError('Failed to generate refresh token', 'GENERATE_TOKEN_FAILED');
}
};
sessionSchema.statics.deleteAllUserSessions = async function (userId) {
/**
* Counts active sessions for a user
* @param {string} userId - The ID of the user
* @returns {Promise<number>}
* @throws {SessionError}
*/
const countActiveSessions = async (userId) => {
try {
if (!userId) {
return;
}
const result = await this.deleteMany({ user: userId });
if (result && result?.deletedCount > 0) {
logger.debug(
`[deleteAllUserSessions] Deleted ${result.deletedCount} sessions for user ${userId}.`,
);
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}
return await Session.countDocuments({
user: userId,
expiration: { $gt: new Date() },
});
} catch (error) {
logger.error('[deleteAllUserSessions] Error in deleting user sessions:', error);
throw error;
logger.error('[countActiveSessions] Error counting active sessions:', error);
throw new SessionError('Failed to count active sessions', 'COUNT_SESSIONS_FAILED');
}
};
const Session = mongoose.model('Session', sessionSchema);
module.exports = Session;
module.exports = {
createSession,
findSession,
updateExpiration,
deleteSession,
deleteAllUserSessions,
generateRefreshToken,
countActiveSessions,
SessionError,
};

View File

@@ -1,82 +1,71 @@
const { nanoid } = require('nanoid');
const { Constants } = require('librechat-data-provider');
const { Conversation } = require('~/models/Conversation');
const SharedLink = require('./schema/shareSchema');
const { getMessages } = require('./Message');
const logger = require('~/config/winston');
/**
* Anonymizes a conversation ID
* @returns {string} The anonymized conversation ID
*/
function anonymizeConvoId() {
return `convo_${nanoid()}`;
class ShareServiceError extends Error {
constructor(message, code) {
super(message);
this.name = 'ShareServiceError';
this.code = code;
}
}
/**
* Anonymizes an assistant ID
* @returns {string} The anonymized assistant ID
*/
function anonymizeAssistantId() {
return `a_${nanoid()}`;
}
const memoizedAnonymizeId = (prefix) => {
const memo = new Map();
return (id) => {
if (!memo.has(id)) {
memo.set(id, `${prefix}_${nanoid()}`);
}
return memo.get(id);
};
};
/**
* Anonymizes a message ID
* @param {string} id - The original message ID
* @returns {string} The anonymized message ID
*/
function anonymizeMessageId(id) {
return id === Constants.NO_PARENT ? id : `msg_${nanoid()}`;
}
const anonymizeConvoId = memoizedAnonymizeId('convo');
const anonymizeAssistantId = memoizedAnonymizeId('a');
const anonymizeMessageId = (id) =>
id === Constants.NO_PARENT ? id : memoizedAnonymizeId('msg')(id);
/**
* Anonymizes a conversation object
* @param {object} conversation - The conversation object
* @returns {object} The anonymized conversation object
*/
function anonymizeConvo(conversation) {
if (!conversation) {
return null;
}
const newConvo = { ...conversation };
if (newConvo.assistant_id) {
newConvo.assistant_id = anonymizeAssistantId();
newConvo.assistant_id = anonymizeAssistantId(newConvo.assistant_id);
}
return newConvo;
}
/**
* Anonymizes messages in a conversation
* @param {TMessage[]} messages - The original messages
* @param {string} newConvoId - The new conversation ID
* @returns {TMessage[]} The anonymized messages
*/
function anonymizeMessages(messages, newConvoId) {
if (!Array.isArray(messages)) {
return [];
}
const idMap = new Map();
return messages.map((message) => {
const newMessageId = anonymizeMessageId(message.messageId);
idMap.set(message.messageId, newMessageId);
const anonymizedMessage = Object.assign(message, {
return {
...message,
messageId: newMessageId,
parentMessageId:
idMap.get(message.parentMessageId) || anonymizeMessageId(message.parentMessageId),
conversationId: newConvoId,
});
if (anonymizedMessage.model && anonymizedMessage.model.startsWith('asst_')) {
anonymizedMessage.model = anonymizeAssistantId();
}
return anonymizedMessage;
model: message.model?.startsWith('asst_')
? anonymizeAssistantId(message.model)
: message.model,
};
});
}
/**
* Retrieves shared messages for a given share ID
* @param {string} shareId - The share ID
* @returns {Promise<object|null>} The shared conversation data or null if not found
*/
async function getSharedMessages(shareId) {
try {
const share = await SharedLink.findOne({ shareId })
const share = await SharedLink.findOne({ shareId, isPublic: true })
.populate({
path: 'messages',
select: '-_id -__v -user',
@@ -84,165 +73,264 @@ async function getSharedMessages(shareId) {
.select('-_id -__v -user')
.lean();
if (!share || !share.conversationId || !share.isPublic) {
if (!share?.conversationId || !share.isPublic) {
return null;
}
const newConvoId = anonymizeConvoId();
return Object.assign(share, {
const newConvoId = anonymizeConvoId(share.conversationId);
const result = {
...share,
conversationId: newConvoId,
messages: anonymizeMessages(share.messages, newConvoId),
});
};
return result;
} catch (error) {
logger.error('[getShare] Error getting share link', error);
throw new Error('Error getting share link');
logger.error('[getShare] Error getting share link', {
error: error.message,
shareId,
});
throw new ShareServiceError('Error getting share link', 'SHARE_FETCH_ERROR');
}
}
/**
* Retrieves shared links for a user
* @param {string} user - The user ID
* @param {number} [pageNumber=1] - The page number
* @param {number} [pageSize=25] - The page size
* @param {boolean} [isPublic=true] - Whether to retrieve public links only
* @returns {Promise<object>} The shared links and pagination data
*/
async function getSharedLinks(user, pageNumber = 1, pageSize = 25, isPublic = true) {
const query = { user, isPublic };
async function getSharedLinks(user, pageParam, pageSize, isPublic, sortBy, sortDirection, search) {
try {
const [totalConvos, sharedLinks] = await Promise.all([
SharedLink.countDocuments(query),
SharedLink.find(query)
.sort({ updatedAt: -1 })
.skip((pageNumber - 1) * pageSize)
.limit(pageSize)
.select('-_id -__v -user')
.lean(),
]);
const query = { user, isPublic };
const totalPages = Math.ceil((totalConvos || 1) / pageSize);
if (pageParam) {
if (sortDirection === 'desc') {
query[sortBy] = { $lt: pageParam };
} else {
query[sortBy] = { $gt: pageParam };
}
}
if (search && search.trim()) {
try {
const searchResults = await Conversation.meiliSearch(search);
if (!searchResults?.hits?.length) {
return {
links: [],
nextCursor: undefined,
hasNextPage: false,
};
}
const conversationIds = searchResults.hits.map((hit) => hit.conversationId);
query['conversationId'] = { $in: conversationIds };
} catch (searchError) {
logger.error('[getSharedLinks] Meilisearch error', {
error: searchError.message,
user,
});
return {
links: [],
nextCursor: undefined,
hasNextPage: false,
};
}
}
const sort = {};
sort[sortBy] = sortDirection === 'desc' ? -1 : 1;
if (Array.isArray(query.conversationId)) {
query.conversationId = { $in: query.conversationId };
}
const sharedLinks = await SharedLink.find(query)
.sort(sort)
.limit(pageSize + 1)
.select('-__v -user')
.lean();
const hasNextPage = sharedLinks.length > pageSize;
const links = sharedLinks.slice(0, pageSize);
const nextCursor = hasNextPage ? links[links.length - 1][sortBy] : undefined;
return {
sharedLinks,
pages: totalPages,
pageNumber,
pageSize,
links: links.map((link) => ({
shareId: link.shareId,
title: link?.title || 'Untitled',
isPublic: link.isPublic,
createdAt: link.createdAt,
conversationId: link.conversationId,
})),
nextCursor,
hasNextPage,
};
} catch (error) {
logger.error('[getShareByPage] Error getting shares', error);
throw new Error('Error getting shares');
}
}
/**
* Creates a new shared link
* @param {string} user - The user ID
* @param {object} shareData - The share data
* @param {string} shareData.conversationId - The conversation ID
* @returns {Promise<object>} The created shared link
*/
async function createSharedLink(user, { conversationId, ...shareData }) {
try {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (share) {
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(share);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(share.messages, newConvoId),
});
}
const shareId = nanoid();
const messages = await getMessages({ conversationId });
const update = { ...shareData, shareId, messages, user };
const newShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
new: true,
upsert: true,
}).lean();
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(newShare);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(newShare.messages, newConvoId),
logger.error('[getSharedLinks] Error getting shares', {
error: error.message,
user,
});
} catch (error) {
logger.error('[createSharedLink] Error creating shared link', error);
throw new Error('Error creating shared link');
throw new ShareServiceError('Error getting shares', 'SHARES_FETCH_ERROR');
}
}
/**
* Updates an existing shared link
* @param {string} user - The user ID
* @param {object} shareData - The share data to update
* @param {string} shareData.conversationId - The conversation ID
* @returns {Promise<object>} The updated shared link
*/
async function updateSharedLink(user, { conversationId, ...shareData }) {
try {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (!share) {
return { message: 'Share not found' };
}
const messages = await getMessages({ conversationId });
const update = { ...shareData, messages, user };
const updatedShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
new: true,
upsert: false,
}).lean();
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(updatedShare);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(updatedShare.messages, newConvoId),
});
} catch (error) {
logger.error('[updateSharedLink] Error updating shared link', error);
throw new Error('Error updating shared link');
}
}
/**
* Deletes a shared link
* @param {string} user - The user ID
* @param {object} params - The deletion parameters
* @param {string} params.shareId - The share ID to delete
* @returns {Promise<object>} The result of the deletion
*/
async function deleteSharedLink(user, { shareId }) {
try {
const result = await SharedLink.findOneAndDelete({ shareId, user });
return result ? { message: 'Share deleted successfully' } : { message: 'Share not found' };
} catch (error) {
logger.error('[deleteSharedLink] Error deleting shared link', error);
throw new Error('Error deleting shared link');
}
}
/**
* Deletes all shared links for a specific user
* @param {string} user - The user ID
* @returns {Promise<object>} The result of the deletion
*/
async function deleteAllSharedLinks(user) {
try {
const result = await SharedLink.deleteMany({ user });
return {
message: 'All shared links have been deleted successfully',
message: 'All shared links deleted successfully',
deletedCount: result.deletedCount,
};
} catch (error) {
logger.error('[deleteAllSharedLinks] Error deleting shared links', error);
throw new Error('Error deleting shared links');
logger.error('[deleteAllSharedLinks] Error deleting shared links', {
error: error.message,
user,
});
throw new ShareServiceError('Error deleting shared links', 'BULK_DELETE_ERROR');
}
}
async function createSharedLink(user, conversationId) {
if (!user || !conversationId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const [existingShare, conversationMessages] = await Promise.all([
SharedLink.findOne({ conversationId, isPublic: true }).select('-_id -__v -user').lean(),
getMessages({ conversationId }),
]);
if (existingShare && existingShare.isPublic) {
throw new ShareServiceError('Share already exists', 'SHARE_EXISTS');
} else if (existingShare) {
await SharedLink.deleteOne({ conversationId });
}
const conversation = await Conversation.findOne({ conversationId }).lean();
const title = conversation?.title || 'Untitled';
const shareId = nanoid();
await SharedLink.create({
shareId,
conversationId,
messages: conversationMessages,
title,
user,
});
return { shareId, conversationId };
} catch (error) {
logger.error('[createSharedLink] Error creating shared link', {
error: error.message,
user,
conversationId,
});
throw new ShareServiceError('Error creating shared link', 'SHARE_CREATE_ERROR');
}
}
async function getSharedLink(user, conversationId) {
if (!user || !conversationId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const share = await SharedLink.findOne({ conversationId, user, isPublic: true })
.select('shareId -_id')
.lean();
if (!share) {
return { shareId: null, success: false };
}
return { shareId: share.shareId, success: true };
} catch (error) {
logger.error('[getSharedLink] Error getting shared link', {
error: error.message,
user,
conversationId,
});
throw new ShareServiceError('Error getting shared link', 'SHARE_FETCH_ERROR');
}
}
async function updateSharedLink(user, shareId) {
if (!user || !shareId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const share = await SharedLink.findOne({ shareId }).select('-_id -__v -user').lean();
if (!share) {
throw new ShareServiceError('Share not found', 'SHARE_NOT_FOUND');
}
const [updatedMessages] = await Promise.all([
getMessages({ conversationId: share.conversationId }),
]);
const newShareId = nanoid();
const update = {
messages: updatedMessages,
user,
shareId: newShareId,
};
const updatedShare = await SharedLink.findOneAndUpdate({ shareId, user }, update, {
new: true,
upsert: false,
runValidators: true,
}).lean();
if (!updatedShare) {
throw new ShareServiceError('Share update failed', 'SHARE_UPDATE_ERROR');
}
anonymizeConvo(updatedShare);
return { shareId: newShareId, conversationId: updatedShare.conversationId };
} catch (error) {
logger.error('[updateSharedLink] Error updating shared link', {
error: error.message,
user,
shareId,
});
throw new ShareServiceError(
error.code === 'SHARE_UPDATE_ERROR' ? error.message : 'Error updating shared link',
error.code || 'SHARE_UPDATE_ERROR',
);
}
}
async function deleteSharedLink(user, shareId) {
if (!user || !shareId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const result = await SharedLink.findOneAndDelete({ shareId, user }).lean();
if (!result) {
return null;
}
return {
success: true,
shareId,
message: 'Share deleted successfully',
};
} catch (error) {
logger.error('[deleteSharedLink] Error deleting shared link', {
error: error.message,
user,
shareId,
});
throw new ShareServiceError('Error deleting shared link', 'SHARE_DELETE_ERROR');
}
}
module.exports = {
SharedLink,
getSharedLink,
getSharedLinks,
createSharedLink,
updateSharedLink,

View File

@@ -1,5 +1,6 @@
const tokenSchema = require('./schema/tokenSchema');
const mongoose = require('mongoose');
const { encryptV2 } = require('~/server/utils/crypto');
const tokenSchema = require('./schema/tokenSchema');
const { logger } = require('~/config');
/**
@@ -7,6 +8,39 @@ const { logger } = require('~/config');
* @type {mongoose.Model}
*/
const Token = mongoose.model('Token', tokenSchema);
/**
* Fixes the indexes for the Token collection from legacy TTL indexes to the new expiresAt index.
*/
async function fixIndexes() {
try {
if (
process.env.NODE_ENV === 'CI' ||
process.env.NODE_ENV === 'development' ||
process.env.NODE_ENV === 'test'
) {
return;
}
const indexes = await Token.collection.indexes();
logger.debug('Existing Token Indexes:', JSON.stringify(indexes, null, 2));
const unwantedTTLIndexes = indexes.filter(
(index) => index.key.createdAt === 1 && index.expireAfterSeconds !== undefined,
);
if (unwantedTTLIndexes.length === 0) {
logger.debug('No unwanted Token indexes found.');
return;
}
for (const index of unwantedTTLIndexes) {
logger.debug(`Dropping unwanted Token index: ${index.name}`);
await Token.collection.dropIndex(index.name);
logger.debug(`Dropped Token index: ${index.name}`);
}
logger.debug('Token index cleanup completed successfully.');
} catch (error) {
logger.error('An error occurred while fixing Token indexes:', error);
}
}
fixIndexes();
/**
* Creates a new Token instance.
@@ -29,8 +63,7 @@ async function createToken(tokenData) {
expiresAt,
};
const newToken = new Token(newTokenData);
return await newToken.save();
return await Token.create(newTokenData);
} catch (error) {
logger.debug('An error occurred while creating token:', error);
throw error;
@@ -42,7 +75,8 @@ async function createToken(tokenData) {
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} query.email - The email of the user.
* @param {String} [query.email] - The email of the user.
* @param {String} [query.identifier] - Unique, alternative identifier for the token.
* @returns {Promise<Object|null>} The matched Token document, or null if not found.
* @throws Will throw an error if the find operation fails.
*/
@@ -59,6 +93,9 @@ async function findToken(query) {
if (query.email) {
conditions.push({ email: query.email });
}
if (query.identifier) {
conditions.push({ identifier: query.identifier });
}
const token = await Token.findOne({
$and: conditions,
@@ -76,6 +113,8 @@ async function findToken(query) {
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} [query.email] - The email of the user.
* @param {String} [query.identifier] - Unique, alternative identifier for the token.
* @param {Object} updateData - The data to update the Token with.
* @returns {Promise<mongoose.Document|null>} The updated Token document, or null if not found.
* @throws Will throw an error if the update operation fails.
@@ -94,14 +133,20 @@ async function updateToken(query, updateData) {
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} query.email - The email of the user.
* @param {String} [query.email] - The email of the user.
* @param {String} [query.identifier] - Unique, alternative identifier for the token.
* @returns {Promise<Object>} The result of the delete operation.
* @throws Will throw an error if the delete operation fails.
*/
async function deleteTokens(query) {
try {
return await Token.deleteMany({
$or: [{ userId: query.userId }, { token: query.token }, { email: query.email }],
$or: [
{ userId: query.userId },
{ token: query.token },
{ email: query.email },
{ identifier: query.identifier },
],
});
} catch (error) {
logger.debug('An error occurred while deleting tokens:', error);
@@ -109,9 +154,46 @@ async function deleteTokens(query) {
}
}
/**
* Handles the OAuth token by creating or updating the token.
* @param {object} fields
* @param {string} fields.userId - The user's ID.
* @param {string} fields.token - The full token to store.
* @param {string} fields.identifier - Unique, alternative identifier for the token.
* @param {number} fields.expiresIn - The number of seconds until the token expires.
* @param {object} fields.metadata - Additional metadata to store with the token.
* @param {string} [fields.type="oauth"] - The type of token. Default is 'oauth'.
*/
async function handleOAuthToken({
token,
userId,
identifier,
expiresIn,
metadata,
type = 'oauth',
}) {
const encrypedToken = await encryptV2(token);
const tokenData = {
type,
userId,
metadata,
identifier,
token: encrypedToken,
expiresIn: parseInt(expiresIn, 10) || 3600,
};
const existingToken = await findToken({ userId, identifier });
if (existingToken) {
return await updateToken({ identifier }, tokenData);
} else {
return await createToken(tokenData);
}
}
module.exports = {
createToken,
findToken,
createToken,
updateToken,
deleteTokens,
handleOAuthToken,
};

View File

@@ -27,6 +27,9 @@ transactionSchema.methods.calculateTokenValue = function () {
*/
transactionSchema.statics.create = async function (txData) {
const Transaction = this;
if (txData.rawAmount != null && isNaN(txData.rawAmount)) {
return;
}
const transaction = new Transaction(txData);
transaction.endpointTokenConfig = txData.endpointTokenConfig;

View File

@@ -1,5 +1,6 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { Transaction } = require('./Transaction');
const Balance = require('./Balance');
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { getMultiplier, getCacheMultiplier } = require('./tx');
@@ -346,3 +347,28 @@ describe('Structured Token Spending Tests', () => {
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0); // Assuming multiplier is 15 and cancelRate is 1.15
});
});
describe('NaN Handling Tests', () => {
test('should skip transaction creation when rawAmount is NaN', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
rawAmount: NaN,
tokenType: 'prompt',
};
const result = await Transaction.create(txData);
expect(result).toBeUndefined();
const balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(initialBalance);
});
});

View File

@@ -26,10 +26,18 @@ const {
deleteMessagesSince,
deleteMessages,
} = require('./Message');
const {
createSession,
findSession,
updateExpiration,
deleteSession,
deleteAllUserSessions,
generateRefreshToken,
countActiveSessions,
} = require('./Session');
const { getConvoTitle, getConvo, saveConvo, deleteConvos } = require('./Conversation');
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
const { createToken, findToken, updateToken, deleteTokens } = require('./Token');
const Session = require('./Session');
const Balance = require('./Balance');
const User = require('./User');
const Key = require('./Key');
@@ -75,8 +83,15 @@ module.exports = {
updateToken,
deleteTokens,
createSession,
findSession,
updateExpiration,
deleteSession,
deleteAllUserSessions,
generateRefreshToken,
countActiveSessions,
User,
Key,
Session,
Balance,
};

View File

@@ -35,6 +35,9 @@ const agentSchema = mongoose.Schema(
model_parameters: {
type: Object,
},
artifacts: {
type: String,
},
access_level: {
type: Number,
},

View File

@@ -20,8 +20,6 @@ const convoSchema = mongoose.Schema(
index: true,
},
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
// google only
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
agentOptions: {
type: mongoose.Schema.Types.Mixed,
},
@@ -29,22 +27,6 @@ const convoSchema = mongoose.Schema(
agent_id: {
type: String,
},
// for bingAI only
bingConversationId: {
type: String,
},
jailbreakConversationId: {
type: String,
},
conversationSignature: {
type: String,
},
clientId: {
type: String,
},
invocationId: {
type: Number,
},
tags: {
type: [String],
default: [],
@@ -53,6 +35,9 @@ const convoSchema = mongoose.Schema(
files: {
type: [String],
},
expiredAt: {
type: Date,
},
},
{ timestamps: true },
);
@@ -61,11 +46,13 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
convoSchema.plugin(mongoMeili, {
host: process.env.MEILI_HOST,
apiKey: process.env.MEILI_MASTER_KEY,
indexName: 'convos', // Will get created automatically if it doesn't exist already
/** Note: Will get created automatically if it doesn't exist already */
indexName: 'convos',
primaryKey: 'conversationId',
});
}
convoSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
convoSchema.index({ createdAt: 1, updatedAt: 1 });
convoSchema.index({ conversationId: 1, user: 1 }, { unique: true });

View File

@@ -1,5 +1,7 @@
const mongoose = require('mongoose');
const conversationPreset = {
// endpoint: [azureOpenAI, openAI, bingAI, anthropic, chatGPTBrowser]
// endpoint: [azureOpenAI, openAI, anthropic, chatGPTBrowser]
endpoint: {
type: String,
default: null,
@@ -24,6 +26,7 @@ const conversationPreset = {
required: false,
},
// for google only
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
modelLabel: {
type: String,
required: false,
@@ -61,19 +64,6 @@ const conversationPreset = {
type: Number,
required: false,
},
// for bingai only
jailbreak: {
type: Boolean,
},
context: {
type: String,
},
systemMessage: {
type: String,
},
toneStyle: {
type: String,
},
file_ids: { type: [{ type: String }], default: undefined },
// deprecated
resendImages: {
@@ -83,6 +73,12 @@ const conversationPreset = {
promptCache: {
type: Boolean,
},
thinking: {
type: Boolean,
},
thinkingBudget: {
type: Number,
},
system: {
type: String,
},
@@ -130,64 +126,12 @@ const conversationPreset = {
max_tokens: {
type: Number,
},
};
const agentOptions = {
model: {
type: String,
required: false,
},
// for azureOpenAI, openAI only
chatGptLabel: {
type: String,
required: false,
},
modelLabel: {
type: String,
required: false,
},
promptPrefix: {
type: String,
required: false,
},
temperature: {
type: Number,
required: false,
},
top_p: {
type: Number,
required: false,
},
// for google only
topP: {
type: Number,
required: false,
},
topK: {
type: Number,
required: false,
},
maxOutputTokens: {
type: Number,
required: false,
},
presence_penalty: {
type: Number,
required: false,
},
frequency_penalty: {
type: Number,
required: false,
},
context: {
type: String,
},
systemMessage: {
/** omni models only */
reasoning_effort: {
type: String,
},
};
module.exports = {
conversationPreset,
agentOptions,
};

View File

@@ -16,7 +16,6 @@ const keySchema = mongoose.Schema({
},
expiresAt: {
type: Date,
expires: 0,
},
});

View File

@@ -62,10 +62,6 @@ const messageSchema = mongoose.Schema(
required: true,
default: false,
},
isEdited: {
type: Boolean,
default: false,
},
unfinished: {
type: Boolean,
default: false,
@@ -138,6 +134,9 @@ const messageSchema = mongoose.Schema(
default: undefined,
},
*/
expiredAt: {
type: Date,
},
},
{ timestamps: true },
);
@@ -150,7 +149,7 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
primaryKey: 'messageId',
});
}
messageSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
messageSchema.index({ createdAt: 1 });
messageSchema.index({ messageId: 1, user: 1 }, { unique: true });

View File

@@ -23,8 +23,6 @@ const presetSchema = mongoose.Schema(
order: {
type: Number,
},
// google only
examples: [{ type: mongoose.Schema.Types.Mixed }],
...conversationPreset,
agentOptions: {
type: mongoose.Schema.Types.Mixed,

View File

@@ -48,6 +48,18 @@ const roleSchema = new mongoose.Schema({
default: true,
},
},
[PermissionTypes.TEMPORARY_CHAT]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.RUN_CODE]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
});
const Role = mongoose.model('Role', roleSchema);

View File

@@ -0,0 +1,20 @@
const mongoose = require('mongoose');
const sessionSchema = mongoose.Schema({
refreshTokenHash: {
type: String,
required: true,
},
expiration: {
type: Date,
required: true,
expires: 0,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
});
module.exports = sessionSchema;

View File

@@ -20,14 +20,6 @@ const shareSchema = mongoose.Schema(
index: true,
},
isPublic: {
type: Boolean,
default: false,
},
isVisible: {
type: Boolean,
default: false,
},
isAnonymous: {
type: Boolean,
default: true,
},

View File

@@ -10,6 +10,10 @@ const tokenSchema = new Schema({
email: {
type: String,
},
type: String,
identifier: {
type: String,
},
token: {
type: String,
required: true,
@@ -23,6 +27,10 @@ const tokenSchema = new Schema({
type: Date,
required: true,
},
metadata: {
type: Map,
of: Schema.Types.Mixed,
},
});
tokenSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 });

View File

@@ -23,6 +23,7 @@ const { SystemRoles } = require('librechat-data-provider');
* @property {string} [ldapId] - Optional LDAP ID for the user
* @property {string} [githubId] - Optional GitHub ID for the user
* @property {string} [discordId] - Optional Discord ID for the user
* @property {string} [appleId] - Optional Apple ID for the user
* @property {Array} [plugins=[]] - List of plugins used by the user
* @property {Array.<MongoSession>} [refreshToken] - List of sessions with refresh tokens
* @property {Date} [expiresAt] - Optional expiration date of the file
@@ -38,6 +39,12 @@ const Session = mongoose.Schema({
},
});
const backupCodeSchema = mongoose.Schema({
codeHash: { type: String, required: true },
used: { type: Boolean, default: false },
usedAt: { type: Date, default: null },
});
/** @type {MongooseSchema<MongoUser>} */
const userSchema = mongoose.Schema(
{
@@ -111,9 +118,19 @@ const userSchema = mongoose.Schema(
unique: true,
sparse: true,
},
appleId: {
type: String,
unique: true,
sparse: true,
},
plugins: {
type: Array,
default: [],
},
totpSecret: {
type: String,
},
backupCodes: {
type: [backupCodeSchema],
},
refreshToken: {
type: [Session],

View File

@@ -75,8 +75,9 @@ const tokenValues = Object.assign(
'4k': { prompt: 1.5, completion: 2 },
'16k': { prompt: 3, completion: 4 },
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'o3-mini': { prompt: 1.1, completion: 4.4 },
'o1-mini': { prompt: 1.1, completion: 4.4 },
'o1-preview': { prompt: 15, completion: 60 },
'o1-mini': { prompt: 3, completion: 12 },
o1: { prompt: 15, completion: 60 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 2.5, completion: 10 },
@@ -87,6 +88,8 @@ const tokenValues = Object.assign(
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
'claude-3.5-sonnet': { prompt: 3, completion: 15 },
'claude-3-7-sonnet': { prompt: 3, completion: 15 },
'claude-3.7-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
@@ -96,12 +99,27 @@ const tokenValues = Object.assign(
'claude-': { prompt: 0.8, completion: 2.4 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
'deepseek-reasoner': { prompt: 0.55, completion: 2.19 },
deepseek: { prompt: 0.14, completion: 0.28 },
/* cohere doesn't have rates for the older command models,
so this was from https://artificialanalysis.ai/models/command-light/providers */
command: { prompt: 0.38, completion: 0.38 },
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
'gemini-2.0-flash': { prompt: 0.1, completion: 0.7 },
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
gemini: { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
'gemini-1.5': { prompt: 2.5, completion: 10 },
'gemini-pro-vision': { prompt: 0.5, completion: 1.5 },
gemini: { prompt: 0.5, completion: 1.5 },
'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 },
'grok-2-vision': { prompt: 2.0, completion: 10.0 },
'grok-vision-beta': { prompt: 5.0, completion: 15.0 },
'grok-2-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-latest': { prompt: 2.0, completion: 10.0 },
'grok-2': { prompt: 2.0, completion: 10.0 },
'grok-beta': { prompt: 5.0, completion: 15.0 },
},
bedrockValues,
);
@@ -113,6 +131,8 @@ const tokenValues = Object.assign(
* @type {Object.<string, {write: number, read: number }>}
*/
const cacheTokenValues = {
'claude-3.7-sonnet': { write: 3.75, read: 0.3 },
'claude-3-7-sonnet': { write: 3.75, read: 0.3 },
'claude-3.5-sonnet': { write: 3.75, read: 0.3 },
'claude-3-5-sonnet': { write: 3.75, read: 0.3 },
'claude-3.5-haiku': { write: 1, read: 0.08 },

View File

@@ -80,6 +80,20 @@ describe('getValueKey', () => {
expect(getValueKey('chatgpt-4o-latest-0718')).toBe('gpt-4o');
});
it('should return "claude-3-7-sonnet" for model type of "claude-3-7-sonnet-"', () => {
expect(getValueKey('claude-3-7-sonnet-20240620')).toBe('claude-3-7-sonnet');
expect(getValueKey('anthropic/claude-3-7-sonnet')).toBe('claude-3-7-sonnet');
expect(getValueKey('claude-3-7-sonnet-turbo')).toBe('claude-3-7-sonnet');
expect(getValueKey('claude-3-7-sonnet-0125')).toBe('claude-3-7-sonnet');
});
it('should return "claude-3.7-sonnet" for model type of "claude-3.7-sonnet-"', () => {
expect(getValueKey('claude-3.7-sonnet-20240620')).toBe('claude-3.7-sonnet');
expect(getValueKey('anthropic/claude-3.7-sonnet')).toBe('claude-3.7-sonnet');
expect(getValueKey('claude-3.7-sonnet-turbo')).toBe('claude-3.7-sonnet');
expect(getValueKey('claude-3.7-sonnet-0125')).toBe('claude-3.7-sonnet');
});
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
@@ -263,6 +277,37 @@ describe('AWS Bedrock Model Tests', () => {
});
});
describe('Deepseek Model Tests', () => {
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner'];
it('should return the correct prompt multipliers for all models', () => {
const results = deepseekModels.map((model) => {
const valueKey = getValueKey(model);
const multiplier = getMultiplier({ valueKey, tokenType: 'prompt' });
return tokenValues[valueKey].prompt && multiplier === tokenValues[valueKey].prompt;
});
expect(results.every(Boolean)).toBe(true);
});
it('should return the correct completion multipliers for all models', () => {
const results = deepseekModels.map((model) => {
const valueKey = getValueKey(model);
const multiplier = getMultiplier({ valueKey, tokenType: 'completion' });
return tokenValues[valueKey].completion && multiplier === tokenValues[valueKey].completion;
});
expect(results.every(Boolean)).toBe(true);
});
it('should return the correct prompt multipliers for reasoning model', () => {
const model = 'deepseek-reasoner';
const valueKey = getValueKey(model);
expect(valueKey).toBe(model);
const multiplier = getMultiplier({ valueKey, tokenType: 'prompt' });
const result = tokenValues[valueKey].prompt && multiplier === tokenValues[valueKey].prompt;
expect(result).toBe(true);
});
});
describe('getCacheMultiplier', () => {
it('should return the correct cache multiplier for a given valueKey and cacheType', () => {
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'write' })).toBe(
@@ -349,3 +394,108 @@ describe('getCacheMultiplier', () => {
).toBe(0.03);
});
});
describe('Google Model Tests', () => {
const googleModels = [
'gemini-2.0-flash-lite-preview-02-05',
'gemini-2.0-flash-001',
'gemini-2.0-flash-exp',
'gemini-2.0-pro-exp-02-05',
'gemini-1.5-flash-8b',
'gemini-1.5-flash-thinking',
'gemini-1.5-pro-latest',
'gemini-1.5-pro-preview-0409',
'gemini-pro-vision',
'gemini-1.0',
'gemini-pro',
];
it('should return the correct prompt and completion rates for all models', () => {
const results = googleModels.map((model) => {
const valueKey = getValueKey(model, EModelEndpoint.google);
const promptRate = getMultiplier({
model,
tokenType: 'prompt',
endpoint: EModelEndpoint.google,
});
const completionRate = getMultiplier({
model,
tokenType: 'completion',
endpoint: EModelEndpoint.google,
});
return { model, valueKey, promptRate, completionRate };
});
results.forEach(({ valueKey, promptRate, completionRate }) => {
expect(promptRate).toBe(tokenValues[valueKey].prompt);
expect(completionRate).toBe(tokenValues[valueKey].completion);
});
});
it('should map to the correct model keys', () => {
const expected = {
'gemini-2.0-flash-lite-preview-02-05': 'gemini-2.0-flash-lite',
'gemini-2.0-flash-001': 'gemini-2.0-flash',
'gemini-2.0-flash-exp': 'gemini-2.0-flash',
'gemini-2.0-pro-exp-02-05': 'gemini-2.0',
'gemini-1.5-flash-8b': 'gemini-1.5-flash-8b',
'gemini-1.5-flash-thinking': 'gemini-1.5-flash',
'gemini-1.5-pro-latest': 'gemini-1.5',
'gemini-1.5-pro-preview-0409': 'gemini-1.5',
'gemini-pro-vision': 'gemini-pro-vision',
'gemini-1.0': 'gemini',
'gemini-pro': 'gemini',
};
Object.entries(expected).forEach(([model, expectedKey]) => {
const valueKey = getValueKey(model, EModelEndpoint.google);
expect(valueKey).toBe(expectedKey);
});
});
it('should handle model names with different formats', () => {
const testCases = [
{ input: 'google/gemini-pro', expected: 'gemini' },
{ input: 'gemini-pro/google', expected: 'gemini' },
{ input: 'google/gemini-2.0-flash-lite', expected: 'gemini-2.0-flash-lite' },
];
testCases.forEach(({ input, expected }) => {
const valueKey = getValueKey(input, EModelEndpoint.google);
expect(valueKey).toBe(expected);
expect(
getMultiplier({ model: input, tokenType: 'prompt', endpoint: EModelEndpoint.google }),
).toBe(tokenValues[expected].prompt);
expect(
getMultiplier({ model: input, tokenType: 'completion', endpoint: EModelEndpoint.google }),
).toBe(tokenValues[expected].completion);
});
});
});
describe('Grok Model Tests - Pricing', () => {
describe('getMultiplier', () => {
test('should return correct prompt and completion rates for Grok vision models', () => {
const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest'];
models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
});
});
test('should return correct prompt and completion rates for Grok text models', () => {
const models = ['grok-2-1212', 'grok-2', 'grok-2-latest'];
models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
});
});
test('should return correct prompt and completion rates for Grok beta models', () => {
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(5.0);
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(15.0);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(5.0);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(15.0);
});
});
});

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.7.6",
"version": "v0.7.7-rc1",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -34,20 +34,21 @@
},
"homepage": "https://librechat.ai",
"dependencies": {
"@anthropic-ai/sdk": "^0.32.1",
"@anthropic-ai/sdk": "^0.37.0",
"@azure/search-documents": "^12.0.0",
"@google/generative-ai": "^0.21.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/mongo": "^2.1.8",
"@keyv/redis": "^2.8.1",
"@langchain/community": "^0.3.14",
"@langchain/core": "^0.3.18",
"@langchain/google-genai": "^0.1.4",
"@langchain/google-vertexai": "^0.1.4",
"@langchain/core": "^0.3.40",
"@langchain/google-genai": "^0.1.9",
"@langchain/google-vertexai": "^0.2.0",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^1.8.8",
"axios": "^1.7.7",
"@librechat/agents": "^2.1.3",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "1.7.8",
"bcryptjs": "^2.4.3",
"cheerio": "^1.0.0-rc.12",
"cohere-ai": "^7.9.1",
"compression": "^1.7.4",
"connect-redis": "^7.1.0",
@@ -56,15 +57,17 @@
"cors": "^2.8.5",
"dedent": "^1.5.3",
"dotenv": "^16.0.3",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"express-mongo-sanitize": "^2.2.0",
"express-rate-limit": "^7.4.1",
"express-session": "^1.18.1",
"express-static-gzip": "^2.2.0",
"file-type": "^18.7.0",
"firebase": "^11.0.2",
"googleapis": "^126.0.1",
"handlebars": "^4.7.7",
"html": "^1.0.0",
"https-proxy-agent": "^7.0.6",
"ioredis": "^5.3.2",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
@@ -76,19 +79,19 @@
"librechat-mcp": "*",
"lodash": "^4.17.21",
"meilisearch": "^0.38.0",
"memorystore": "^1.6.7",
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^8.8.3",
"mongoose": "^8.9.5",
"multer": "^1.4.5-lts.1",
"nanoid": "^3.3.7",
"nodejs-gpt": "^1.37.4",
"nodemailer": "^6.9.15",
"ollama": "^0.5.0",
"openai": "^4.47.1",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^5.4.2",
"passport": "^0.6.0",
"passport-custom": "^1.1.1",
"passport-apple": "^2.0.2",
"passport-discord": "^0.1.4",
"passport-facebook": "^3.0.0",
"passport-github2": "^0.1.12",
@@ -96,19 +99,19 @@
"passport-jwt": "^4.0.1",
"passport-ldapauth": "^3.0.1",
"passport-local": "^1.0.0",
"pino": "^8.12.1",
"sharp": "^0.32.6",
"tiktoken": "^1.0.15",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^4.7.1",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},
"devDependencies": {
"jest": "^29.7.0",
"mongodb-memory-server": "^10.0.0",
"nodemon": "^3.0.1",
"supertest": "^6.3.3"
"mongodb-memory-server": "^10.1.3",
"nodemon": "^3.0.3",
"supertest": "^7.0.0"
}
}

View File

@@ -1,8 +1,6 @@
const throttle = require('lodash/throttle');
const { getResponseSender, Constants, CacheKeys, Time } = require('librechat-data-provider');
const { getResponseSender, Constants } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const { saveMessage } = require('~/models');
const { logger } = require('~/config');
@@ -57,33 +55,9 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
try {
const { client } = await initializeClient({ req, res, endpointOption });
const messageCache = getLogStores(CacheKeys.MESSAGES);
const { onProgress: progressCallback, getPartialText } = createOnProgress({
onProgress: throttle(
({ text: partialText }) => {
/*
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
messageCache.set(responseMessageId, {
messageId: responseMessageId,
sender,
conversationId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: partialText,
model: client.modelOptions.model,
unfinished,
error: false,
user,
}, Time.FIVE_MINUTES);
*/
const { onProgress: progressCallback, getPartialText } = createOnProgress();
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
},
3000,
{ trailing: false },
),
});
getText = getPartialText;
getText = client.getStreamText != null ? client.getStreamText.bind(client) : getPartialText;
const getAbortData = () => ({
sender,
@@ -91,7 +65,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
userMessagePromise,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
text: getText(),
userMessage,
promptTokens,
});
@@ -176,11 +150,13 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
} catch (error) {
const partialText = getText && getText();
handleAbortError(res, req, error, {
sender,
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
}).catch((err) => {
logger.error('[AskController] Error in `handleAbortError`', err);
});
}
};

View File

@@ -6,8 +6,7 @@ const {
setAuthTokens,
requestPasswordReset,
} = require('~/server/services/AuthService');
const { hashToken } = require('~/server/utils/crypto');
const { Session, getUserById } = require('~/models');
const { findSession, getUserById, deleteAllUserSessions } = require('~/models');
const { logger } = require('~/config');
const registrationController = async (req, res) => {
@@ -45,6 +44,7 @@ const resetPasswordController = async (req, res) => {
if (resetPasswordService instanceof Error) {
return res.status(400).json(resetPasswordService);
} else {
await deleteAllUserSessions({ userId: req.body.userId });
return res.status(200).json(resetPasswordService);
}
} catch (e) {
@@ -61,7 +61,7 @@ const refreshController = async (req, res) => {
try {
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const user = await getUserById(payload.id, '-password -__v');
const user = await getUserById(payload.id, '-password -__v -totpSecret');
if (!user) {
return res.status(401).redirect('/login');
}
@@ -73,11 +73,9 @@ const refreshController = async (req, res) => {
return res.status(200).send({ token, user });
}
// Hash the refresh token
const hashedToken = await hashToken(refreshToken);
// Find the session with the hashed refresh token
const session = await Session.findOne({ user: userId, refreshTokenHash: hashedToken });
const session = await findSession({ userId: userId, refreshToken: refreshToken });
if (session && session.expiration > new Date()) {
const token = await setAuthTokens(userId, res, session._id);
res.status(200).send({ token, user });

View File

@@ -1,8 +1,6 @@
const throttle = require('lodash/throttle');
const { getResponseSender, CacheKeys, Time } = require('librechat-data-provider');
const { getResponseSender } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const { saveMessage } = require('~/models');
const { logger } = require('~/config');
@@ -53,62 +51,44 @@ const EditController = async (req, res, next, initializeClient) => {
}
};
const messageCache = getLogStores(CacheKeys.MESSAGES);
const { onProgress: progressCallback, getPartialText } = createOnProgress({
generation,
onProgress: throttle(
({ text: partialText }) => {
/*
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
{
messageId: responseMessageId,
sender,
conversationId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: partialText,
model: endpointOption.modelOptions.model,
unfinished,
isEdited: true,
error: false,
user,
} */
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
},
3000,
{ trailing: false },
),
});
const getAbortData = () => ({
conversationId,
userMessagePromise,
messageId: responseMessageId,
sender,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
res.on('close', () => {
logger.debug('[EditController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
abortController.abort();
logger.debug('[EditController] Request aborted on close');
});
let getText;
try {
const { client } = await initializeClient({ req, res, endpointOption });
getText = client.getStreamText != null ? client.getStreamText.bind(client) : getPartialText;
const getAbortData = () => ({
conversationId,
userMessagePromise,
messageId: responseMessageId,
sender,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getText(),
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
res.on('close', () => {
logger.debug('[EditController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
abortController.abort();
logger.debug('[EditController] Request aborted on close');
});
let response = await client.sendMessage(text, {
user,
generation,
@@ -153,13 +133,15 @@ const EditController = async (req, res, next, initializeClient) => {
);
}
} catch (error) {
const partialText = getPartialText();
const partialText = getText();
handleAbortError(res, req, error, {
sender,
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
}).catch((err) => {
logger.error('[EditController] Error in `handleAbortError`', err);
});
}
};

View File

@@ -1,7 +1,7 @@
const { promises: fs } = require('fs');
const { CacheKeys, AuthType } = require('librechat-data-provider');
const { addOpenAPISpecs } = require('~/app/clients/tools/util/addOpenAPISpecs');
const { getCustomConfig } = require('~/server/services/Config');
const { availableTools } = require('~/app/clients/tools');
const { getMCPManager } = require('~/config');
const { getLogStores } = require('~/cache');
@@ -59,10 +59,9 @@ const getAvailablePluginsController = async (req, res) => {
/** @type {{ filteredTools: string[], includedTools: string[] }} */
const { filteredTools = [], includedTools = [] } = req.app.locals;
const pluginManifest = await fs.readFile(req.app.locals.paths.pluginManifest, 'utf8');
const jsonData = JSON.parse(pluginManifest);
const pluginManifest = availableTools;
const uniquePlugins = filterUniquePlugins(jsonData);
const uniquePlugins = filterUniquePlugins(pluginManifest);
let authenticatedPlugins = [];
for (const plugin of uniquePlugins) {
authenticatedPlugins.push(
@@ -106,17 +105,15 @@ const getAvailableTools = async (req, res) => {
return;
}
const pluginManifest = await fs.readFile(req.app.locals.paths.pluginManifest, 'utf8');
const jsonData = JSON.parse(pluginManifest);
const pluginManifest = availableTools;
const customConfig = await getCustomConfig();
if (customConfig?.mcpServers != null) {
const mcpManager = await getMCPManager();
await mcpManager.loadManifestTools(jsonData);
await mcpManager.loadManifestTools(pluginManifest);
}
/** @type {TPlugin[]} */
const uniquePlugins = filterUniquePlugins(jsonData);
const uniquePlugins = filterUniquePlugins(pluginManifest);
const authenticatedPlugins = uniquePlugins.map((plugin) => {
if (checkPluginAuth(plugin)) {
@@ -126,8 +123,12 @@ const getAvailableTools = async (req, res) => {
}
});
const toolDefinitions = req.app.locals.availableTools;
const tools = authenticatedPlugins.filter(
(plugin) => req.app.locals.availableTools[plugin.pluginKey] !== undefined,
(plugin) =>
toolDefinitions[plugin.pluginKey] !== undefined ||
(plugin.toolkit === true &&
Object.keys(toolDefinitions).some((key) => key.startsWith(`${plugin.pluginKey}_`))),
);
await cache.set(CacheKeys.TOOLS, tools);

View File

@@ -0,0 +1,119 @@
const {
verifyTOTP,
verifyBackupCode,
generateTOTPSecret,
generateBackupCodes,
getTOTPSecret,
} = require('~/server/services/twoFactorService');
const { updateUser, getUserById } = require('~/models');
const { logger } = require('~/config');
const { encryptV2 } = require('~/server/utils/crypto');
const enable2FAController = async (req, res) => {
const safeAppTitle = (process.env.APP_TITLE || 'LibreChat').replace(/\s+/g, '');
try {
const userId = req.user.id;
const secret = generateTOTPSecret();
const { plainCodes, codeObjects } = await generateBackupCodes();
const encryptedSecret = await encryptV2(secret);
const user = await updateUser(userId, { totpSecret: encryptedSecret, backupCodes: codeObjects });
const otpauthUrl = `otpauth://totp/${safeAppTitle}:${user.email}?secret=${secret}&issuer=${safeAppTitle}`;
res.status(200).json({
otpauthUrl,
backupCodes: plainCodes,
});
} catch (err) {
logger.error('[enable2FAController]', err);
res.status(500).json({ message: err.message });
}
};
const verify2FAController = async (req, res) => {
try {
const userId = req.user.id;
const { token, backupCode } = req.body;
const user = await getUserById(userId);
if (!user || !user.totpSecret) {
return res.status(400).json({ message: '2FA not initiated' });
}
// Retrieve the plain TOTP secret using getTOTPSecret.
const secret = await getTOTPSecret(user.totpSecret);
if (token && (await verifyTOTP(secret, token))) {
return res.status(200).json();
} else if (backupCode) {
const verified = await verifyBackupCode({ user, backupCode });
if (verified) {
return res.status(200).json();
}
}
return res.status(400).json({ message: 'Invalid token.' });
} catch (err) {
logger.error('[verify2FAController]', err);
res.status(500).json({ message: err.message });
}
};
const confirm2FAController = async (req, res) => {
try {
const userId = req.user.id;
const { token } = req.body;
const user = await getUserById(userId);
if (!user || !user.totpSecret) {
return res.status(400).json({ message: '2FA not initiated' });
}
// Retrieve the plain TOTP secret using getTOTPSecret.
const secret = await getTOTPSecret(user.totpSecret);
if (await verifyTOTP(secret, token)) {
return res.status(200).json();
}
return res.status(400).json({ message: 'Invalid token.' });
} catch (err) {
logger.error('[confirm2FAController]', err);
res.status(500).json({ message: err.message });
}
};
const disable2FAController = async (req, res) => {
try {
const userId = req.user.id;
await updateUser(userId, { totpSecret: null, backupCodes: [] });
res.status(200).json();
} catch (err) {
logger.error('[disable2FAController]', err);
res.status(500).json({ message: err.message });
}
};
const regenerateBackupCodesController = async (req, res) => {
try {
const userId = req.user.id;
const { plainCodes, codeObjects } = await generateBackupCodes();
await updateUser(userId, { backupCodes: codeObjects });
res.status(200).json({
backupCodes: plainCodes,
backupCodesHash: codeObjects,
});
} catch (err) {
logger.error('[regenerateBackupCodesController]', err);
res.status(500).json({ message: err.message });
}
};
module.exports = {
enable2FAController,
verify2FAController,
confirm2FAController,
disable2FAController,
regenerateBackupCodesController,
};

View File

@@ -1,5 +1,4 @@
const {
Session,
Balance,
getFiles,
deleteFiles,
@@ -7,6 +6,7 @@ const {
deletePresets,
deleteMessages,
deleteUserById,
deleteAllUserSessions,
} = require('~/models');
const User = require('~/models/User');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
@@ -19,7 +19,9 @@ const { Transaction } = require('~/models/Transaction');
const { logger } = require('~/config');
const getUserController = async (req, res) => {
res.status(200).send(req.user);
const userData = req.user.toObject != null ? req.user.toObject() : { ...req.user };
delete userData.totpSecret;
res.status(200).send(userData);
};
const getTermsStatusController = async (req, res) => {
@@ -112,7 +114,7 @@ const deleteUserController = async (req, res) => {
try {
await deleteMessages({ user: user.id }); // delete user messages
await Session.deleteMany({ user: user.id }); // delete user sessions
await deleteAllUserSessions({ userId: user.id }); // delete user sessions
await Transaction.deleteMany({ user: user.id }); // delete user transactions
await deleteUserKey({ userId: user.id, all: true }); // delete user keys
await Balance.deleteMany({ user: user._id }); // delete user balances

View File

@@ -1,14 +1,17 @@
const { Tools, StepTypes, imageGenTools, FileContext } = require('librechat-data-provider');
const {
EnvVar,
Providers,
GraphEvents,
getMessageId,
ToolEndHandler,
handleToolCalls,
ChatModelStreamHandler,
} = require('@librechat/agents');
const { processCodeOutput } = require('~/server/services/Files/Code/process');
const { saveBase64Image } = require('~/server/services/Files/process');
const { loadAuthValues } = require('~/app/clients/tools/util');
const { logger } = require('~/config');
const { logger, sendEvent } = require('~/config');
/** @typedef {import('@librechat/agents').Graph} Graph */
/** @typedef {import('@librechat/agents').EventHandler} EventHandler */
@@ -19,20 +22,6 @@ const { logger } = require('~/config');
/** @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator */
/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */
/**
* Sends message data in Server Sent Events format.
* @param {ServerResponse} res - The server response.
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
* @param {string} event.event - The type of event.
* @param {string} event.data - The message to be sent.
*/
const sendEvent = (res, event) => {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
class ModelEndHandler {
/**
* @param {Array<UsageMetadata>} collectedUsage
@@ -57,13 +46,54 @@ class ModelEndHandler {
return;
}
const usage = data?.output?.usage_metadata;
if (metadata?.model) {
usage.model = metadata.model;
}
try {
if (metadata.provider === Providers.GOOGLE || graph.clientOptions?.disableStreaming) {
handleToolCalls(data?.output?.tool_calls, metadata, graph);
}
const usage = data?.output?.usage_metadata;
if (!usage) {
return;
}
if (metadata?.model) {
usage.model = metadata.model;
}
if (usage) {
this.collectedUsage.push(usage);
if (!graph.clientOptions?.disableStreaming) {
return;
}
if (!data.output.content) {
return;
}
const stepKey = graph.getStepKey(metadata);
const message_id = getMessageId(stepKey, graph) ?? '';
if (message_id) {
graph.dispatchRunStep(stepKey, {
type: StepTypes.MESSAGE_CREATION,
message_creation: {
message_id,
},
});
}
const stepId = graph.getStepIdByKey(stepKey);
const content = data.output.content;
if (typeof content === 'string') {
graph.dispatchMessageDelta(stepId, {
content: [
{
type: 'text',
text: content,
},
],
});
} else if (content.every((c) => c.type?.startsWith('text'))) {
graph.dispatchMessageDelta(stepId, {
content,
});
}
} catch (error) {
logger.error('Error handling model end event:', error);
}
}
}
@@ -169,6 +199,22 @@ function getDefaultHandlers({ res, aggregateContent, toolEndCallback, collectedU
aggregateContent({ event, data });
},
},
[GraphEvents.ON_REASONING_DELTA]: {
/**
* Handle ON_REASONING_DELTA event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
* @param {GraphRunnableConfig['configurable']} [metadata] The runnable metadata.
*/
handle: (event, data, metadata) => {
if (metadata?.last_agent_index === metadata?.agent_index) {
sendEvent(res, { event, data });
} else if (!metadata?.hide_sequential_outputs) {
sendEvent(res, { event, data });
}
aggregateContent({ event, data });
},
},
};
return handlers;
@@ -311,7 +357,6 @@ function createToolEndCallback({ req, res, artifactPromises }) {
}
module.exports = {
sendEvent,
getDefaultHandlers,
createToolEndCallback,
};

View File

@@ -20,13 +20,9 @@ const {
bedrockOutputParser,
removeNullishValues,
} = require('librechat-data-provider');
const {
extractBaseURL,
// constructAzureURL,
// genAzureChatCompletion,
} = require('~/utils');
const {
formatMessage,
addCacheControl,
formatAgentMessages,
formatContentStrings,
createContextHandlers,
@@ -40,6 +36,7 @@ const { createRun } = require('./run');
const { logger } = require('~/config');
/** @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex */
/** @typedef {import('@langchain/core/runnables').RunnableConfig} RunnableConfig */
const providerParsers = {
[EModelEndpoint.openAI]: openAISchema,
@@ -59,6 +56,9 @@ const noSystemModelRegex = [/\bo1\b/gi];
class AgentClient extends BaseClient {
constructor(options = {}) {
super(null, options);
/** The current client class
* @type {string} */
this.clientName = EModelEndpoint.agents;
/** @type {'discard' | 'summarize'} */
this.contextStrategy = 'discard';
@@ -90,6 +90,14 @@ class AgentClient extends BaseClient {
this.options = Object.assign({ endpoint: options.endpoint }, clientOptions);
/** @type {string} */
this.model = this.options.agent.model_parameters.model;
/** The key for the usage object's input tokens
* @type {string} */
this.inputTokensKey = 'input_tokens';
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
/** @type {UsageMetadata} */
this.usage;
}
/**
@@ -192,6 +200,7 @@ class AgentClient extends BaseClient {
resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
spec: this.options.spec,
iconURL: this.options.iconURL,
},
// TODO: PARSE OPTIONS BY PROVIDER, MAY CONTAIN SENSITIVE DATA
runOptions,
@@ -327,16 +336,18 @@ class AgentClient extends BaseClient {
this.options.agent.instructions = systemContent;
}
/** @type {Record<string, number> | undefined} */
let tokenCountMap;
if (this.contextStrategy) {
({ payload, promptTokens, messages } = await this.handleContextStrategy({
({ payload, promptTokens, tokenCountMap, messages } = await this.handleContextStrategy({
orderedMessages,
formattedMessages,
/* prefer usage_metadata from final message */
buildTokenMap: false,
}));
}
const result = {
tokenCountMap,
prompt: payload,
promptTokens,
messages,
@@ -366,8 +377,26 @@ class AgentClient extends BaseClient {
* @param {UsageMetadata[]} [params.collectedUsage=this.collectedUsage]
*/
async recordCollectedUsage({ model, context = 'message', collectedUsage = this.collectedUsage }) {
for (const usage of collectedUsage) {
await spendTokens(
if (!collectedUsage || !collectedUsage.length) {
return;
}
const input_tokens = collectedUsage[0]?.input_tokens || 0;
let output_tokens = 0;
let previousTokens = input_tokens; // Start with original input
for (let i = 0; i < collectedUsage.length; i++) {
const usage = collectedUsage[i];
if (i > 0) {
// Count new tokens generated (input_tokens minus previous accumulated tokens)
output_tokens += (Number(usage.input_tokens) || 0) - previousTokens;
}
// Add this message's output tokens
output_tokens += Number(usage.output_tokens) || 0;
// Update previousTokens to include this message's output
previousTokens += Number(usage.output_tokens) || 0;
spendTokens(
{
context,
conversationId: this.conversationId,
@@ -376,8 +405,66 @@ class AgentClient extends BaseClient {
model: usage.model ?? model ?? this.model ?? this.options.agent.model_parameters.model,
},
{ promptTokens: usage.input_tokens, completionTokens: usage.output_tokens },
);
).catch((err) => {
logger.error(
'[api/server/controllers/agents/client.js #recordCollectedUsage] Error spending tokens',
err,
);
});
}
this.usage = {
input_tokens,
output_tokens,
};
}
/**
* Get stream usage as returned by this client's API response.
* @returns {UsageMetadata} The stream usage object.
*/
getStreamUsage() {
return this.usage;
}
/**
* @param {TMessage} responseMessage
* @returns {number}
*/
getTokenCountForResponse({ content }) {
return this.getTokenCountForMessage({
role: 'assistant',
content,
});
}
/**
* Calculates the correct token count for the current user message based on the token count map and API usage.
* Edge case: If the calculation results in a negative value, it returns the original estimate.
* If revisiting a conversation with a chat history entirely composed of token estimates,
* the cumulative token count going forward should become more accurate as the conversation progresses.
* @param {Object} params - The parameters for the calculation.
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
* @param {string} params.currentMessageId - The ID of the current message to calculate.
* @param {OpenAIUsageMetadata} params.usage - The usage object returned by the API.
* @returns {number} The correct token count for the current user message.
*/
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
const originalEstimate = tokenCountMap[currentMessageId] || 0;
if (!usage || typeof usage[this.inputTokensKey] !== 'number') {
return originalEstimate;
}
tokenCountMap[currentMessageId] = 0;
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
const numCount = Number(count);
return sum + (isNaN(numCount) ? 0 : numCount);
}, 0);
const totalInputTokens = usage[this.inputTokensKey] ?? 0;
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
}
async chatCompletion({ payload, abortController = null }) {
@@ -386,19 +473,6 @@ class AgentClient extends BaseClient {
abortController = new AbortController();
}
const baseURL = extractBaseURL(this.completionsUrl);
logger.debug('[api/server/controllers/agents/client.js] chatCompletion', {
baseURL,
payload,
});
// if (this.useOpenRouter) {
// opts.defaultHeaders = {
// 'HTTP-Referer': 'https://librechat.ai',
// 'X-Title': 'LibreChat',
// };
// }
// if (this.options.headers) {
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
// }
@@ -488,12 +562,14 @@ class AgentClient extends BaseClient {
// });
// }
/** @type {Partial<RunnableConfig> & { version: 'v1' | 'v2'; run_id?: string; streamMode: string }} */
const config = {
configurable: {
thread_id: this.conversationId,
last_agent_index: this.agentConfigs?.size ?? 0,
hide_sequential_outputs: this.options.agent.hide_sequential_outputs,
},
recursionLimit: this.options.req.app.locals[EModelEndpoint.agents]?.recursionLimit,
signal: abortController.signal,
streamMode: 'values',
version: 'v2',
@@ -514,7 +590,7 @@ class AgentClient extends BaseClient {
* @param {number} [i]
* @param {TMessageContentParts[]} [contentData]
*/
const runAgent = async (agent, messages, i = 0, contentData = []) => {
const runAgent = async (agent, _messages, i = 0, contentData = []) => {
config.configurable.model = agent.model_parameters.model;
if (i > 0) {
this.model = agent.model_parameters.model;
@@ -533,7 +609,7 @@ class AgentClient extends BaseClient {
let systemContent = [
systemMessage,
agent.instructions ?? '',
i !== 0 ? agent.additional_instructions ?? '' : '',
i !== 0 ? (agent.additional_instructions ?? '') : '',
]
.join('\n')
.trim();
@@ -547,12 +623,21 @@ class AgentClient extends BaseClient {
}
if (noSystemMessages === true && systemContent?.length) {
let latestMessage = messages.pop().content;
let latestMessage = _messages.pop().content;
if (typeof latestMessage !== 'string') {
latestMessage = latestMessage[0].text;
}
latestMessage = [systemContent, latestMessage].join('\n');
messages.push(new HumanMessage(latestMessage));
_messages.push(new HumanMessage(latestMessage));
}
let messages = _messages;
if (
agent.model_parameters?.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes(
'prompt-caching',
)
) {
messages = addCacheControl(messages);
}
run = await createRun({
@@ -672,12 +757,14 @@ class AgentClient extends BaseClient {
);
});
this.recordCollectedUsage({ context: 'message' }).catch((err) => {
try {
await this.recordCollectedUsage({ context: 'message' });
} catch (err) {
logger.error(
'[api/server/controllers/agents/client.js #chatCompletion] Error recording collected usage',
err,
);
});
}
} catch (err) {
if (!abortController.signal.aborted) {
logger.error(
@@ -763,8 +850,11 @@ class AgentClient extends BaseClient {
}
}
/** Silent method, as `recordCollectedUsage` is used instead */
async recordTokenUsage() {}
getEncoding() {
return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
return 'o200k_base';
}
/**

View File

@@ -142,7 +142,9 @@ const AgentController = async (req, res, next, initializeClient, addTitle) => {
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId ?? parentMessageId,
}).catch((err) => {
logger.error('[api/server/controllers/agents/request] Error in `handleAbortError`', err);
});
}
};

View File

@@ -1,5 +1,5 @@
const { Run, Providers } = require('@librechat/agents');
const { providerEndpointMap } = require('librechat-data-provider');
const { providerEndpointMap, KnownEndpoints } = require('librechat-data-provider');
/**
* @typedef {import('@librechat/agents').t} t
@@ -7,6 +7,7 @@ const { providerEndpointMap } = require('librechat-data-provider');
* @typedef {import('@librechat/agents').StreamEventData} StreamEventData
* @typedef {import('@librechat/agents').EventHandler} EventHandler
* @typedef {import('@librechat/agents').GraphEvents} GraphEvents
* @typedef {import('@librechat/agents').LLMConfig} LLMConfig
* @typedef {import('@librechat/agents').IState} IState
*/
@@ -32,6 +33,7 @@ async function createRun({
streamUsage = true,
}) {
const provider = providerEndpointMap[agent.provider] ?? agent.provider;
/** @type {LLMConfig} */
const llmConfig = Object.assign(
{
provider,
@@ -41,10 +43,21 @@ async function createRun({
agent.model_parameters,
);
/** @type {'reasoning_content' | 'reasoning'} */
let reasoningKey;
if (llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter)) {
reasoningKey = 'reasoning';
}
if (/o1(?!-(?:mini|preview)).*$/.test(llmConfig.model)) {
llmConfig.streaming = false;
llmConfig.disableStreaming = true;
}
/** @type {StandardGraphConfig} */
const graphConfig = {
signal,
llmConfig,
reasoningKey,
tools: agent.tools,
instructions: agent.instructions,
additional_instructions: agent.additional_instructions,

View File

@@ -397,16 +397,6 @@ const chatV2 = async (req, res) => {
response = streamRunManager;
response.text = streamRunManager.intermediateText;
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessageId,
{
complete: true,
text: response.text,
},
Time.FIVE_MINUTES,
);
};
await processRun();

Some files were not shown because too many files have changed in this diff Show More