Compare commits

...

122 Commits

Author SHA1 Message Date
Marco Beretta
c27e8566fb Merge branch 'main' into re-add-download-audio 2024-07-30 05:36:43 -04:00
Danny Avila
4ffdefc2a8 🔧 refactor: update userSchema default value for role 2024-07-30 01:48:20 -04:00
Marco Beretta
2d5f704695 🎨 style: bookmarks UI update (#3479)
* style: bookmarks update; style(Files): minor update

* style: update conversation bookmarks

* style: fix w TableCell
2024-07-29 19:25:36 -04:00
Oliver Faust
3fd25920d4 🛠️ fix(ldap): LDAP_LOGIN_USES_USERNAME config (#3472) 2024-07-29 14:32:35 -04:00
Yuichi Oneda
d03f8285db 🔧 refactor: Enable Scrolling Chat Message with Page keys (#3350) 2024-07-29 10:46:36 -04:00
Yuichi Oneda
e565e0faab 🔖 feat: Conversation Bookmarks (#3344)
* feat: add tags property in Conversation model

* feat: add ConversationTag model

* feat: add the tags parameter to getConvosByPage

* feat: add API route to ConversationTag

* feat: add types of ConversationTag

* feat: add data access functions for conversation tags

* feat: add Bookmark table component

* feat: Add an action to bookmark

* feat: add Bookmark nav component

* fix: failed test

* refactor: made 'Saved' tag a constant

* feat: add new bookmark to current conversation

* chore: Add comment

* fix: delete tag from conversations when it's deleted

* fix: Update the query cache when the tag title is changed.

* chore: fix typo

* refactor: add description of rebuilding bookmarks

* chore: remove unused variables

* fix: position when adding a new bookmark

* refactor: add comment, rename a function

* refactor: add a unique constraint in ConversationTag

* chore: add localizations
2024-07-29 10:45:59 -04:00
Jacob Colyvan
d4d56281e3 🗨️ fix: respect line breaks in prompt variables (#3459)
* fix: always display new lines in PromptEditor

* fix: allow multiline inputs in prompt template
2024-07-27 18:02:34 -04:00
Arthur Barrett
14eff23b57 🔧 fix: Ensure ShareView document title is configured (#3336)
* 🔧 fix: ensure ShareView document title is configured

* chore: import order

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
2024-07-27 15:47:26 -04:00
Ravi Katiyar
18fd8f1416 🔒 feat: add option to disable TLS for LDAP authentication (#3247)
* feat: add ldap tls config

* Update ldapStrategy.js

* LDAP_TLS_REJECT_UNAUTHORIZED optional

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2024-07-27 15:46:39 -04:00
Danny Avila
ba9cb71245 🛂 feat: Allow LDAP login via username (#3463)
* Allow LDAP login via username

This patch adds the option to login via username instead of using an
email address since the latter may not be unique or may change.

For example, our organization has two main domains and users have a log
and a short form of their mail address. This makes it hard for users to
identify what their primary email address is and causes a lot of
confusion. Using their username instead makes it much easier.

Using a username will also make it easier in the future to not need a
separate bind user to get user attributes. So, this is also a bit of
prep work for that.

* Update config.js

* feat: Enable LDAP login via username

This commit enables the option to login via username instead of using an email address for LDAP authentication. This change is necessary because email addresses may not be unique or may change, causing confusion for users. By using usernames, it becomes easier for users to identify their primary email address. Additionally, this change prepares for future improvements by eliminating the need for a separate bind user to retrieve user attributes.

Co-authored-by: Danny Avila <danny@librechat.ai>

* chore: jsdocs

* chore: import order

* ci: add ldap config tests

---------

Co-authored-by: Lars Kiesow <lkiesow@uos.de>
2024-07-27 15:42:18 -04:00
Danny Avila
e5dfa06e6c 🎚️ chore: Update Logging Levels for Conversations and Messages (#3415) 2024-07-21 13:54:47 -04:00
Marco Beretta
0bd59c0efe 🎨 style: update Assistants builder (#3397)
* style: update Assistant builder

* fix(Eng): re-introduce old file_search info message

* feat: new OGDialogTemplate; style: imporved tools + actions dialogs

* style: fix alignment issue for delete tool dialog

* chore: import order

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-07-21 13:46:43 -04:00
Marco Beretta
344297021f 🔧 fix: STT/TTS external checks pt. 2 (#3410) 2024-07-21 17:38:04 +02:00
Marco Beretta
620973436c 🔧 fix: STT/TTS external checks (#3409)
* fix: STT/TTS external

* remove unnecessary console log
2024-07-20 23:14:01 +02:00
Danny Avila
422d1a2c91 🤖 feat: new Anthropic Default Settings / Increased Output Tokens for 3.5-Sonnet (#3407)
* chore: bump data-provider

* feat: Add anthropicSettings to endpointSettings

The commit adds the `anthropicSettings` object to the `endpointSettings` in the `schemas.ts` file. This allows for the configuration of settings specific to the `anthropic` model endpoint.

* chore: adjust maxoutputtokens localization

* feat: Update AnthropicClient to use anthropicSettings for default model options and increased output beta header

* ci: new anthropic tests
2024-07-20 08:53:16 -04:00
Danny Avila
2ad097647c fix: Wait for Initial Message Save & Correct Latest Message (#3399)
* chore: assistants, unsupported assistant, better logging

* chore: remove unnecessary logger in validateAssistant middleware

* fix: resolve initial conversation save/promise before saving response

* chore: Import and organize dependencies in Speech component

* fix: conversation statefulness
- Latest Message (at index 0) should not be reset if existing convo
- add debugging context for clearAllLatestMessages
- Added logging concerning latest Message updates (dev mode only)
- update latest message Set logic, also checks for change in conversation Id
- consolidated latest message helpers to client/src/utils/messages.ts
2024-07-20 01:51:59 -04:00
Danny Avila
9e7615f832 ⚙️ fix: Plugin Message Handling Errors (#3392)
- Add unique index for messageId and user in messageSchema
- use `updateMessage` for updating the plugins message?
- add better logging for updateMessage
- prevents dupe_key or getKeyIndex error
2024-07-19 08:06:05 -04:00
Marco Beretta
ee4dd1b2e9 🚀 feat: gpt-4o-mini (#3384)
* feat: `gpt-4o-mini`

* feat: retrival

* fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini'

* fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini'

* fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini'

* fix: add jsdoc

* fix: Update order of model token values for 'gpt-4o' and 'gpt-4o-mini'

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-07-19 07:59:07 -04:00
Danny Avila
f6125ccd59 🌿 fix: Message Route Improvements pt. 2 (#3379)
* fix: edge case with debugTraverse function in parsers.js

* chore: Update error message in audio stream processing

* refactor: Add ONE_MINUTE and THIRTY_SECONDS options to Time enum

* fix: logging
2024-07-18 13:56:19 -04:00
Danny Avila
1acd47a0f6 🌿 fix: Message Route Improvements (#3378)
* fix: do not throw errors for invalid convo id, simply return undefined

* feat: Add error handling and logging to message route definitions

* feat: Refactor Message.js to improve code organization and readability

* fix: Return undefined instead of throwing error for invalid conversation ID in Message.spec.js
2024-07-18 10:07:10 -04:00
Marco Beretta
511f1336da Merge branch 'main' into re-add-download-audio 2024-07-17 18:38:28 +02:00
Danny Avila
5d40d0a37a ⚙️ feat: Adjust Rate of Stream Progress (#3244)
* chore: bump data-provider and add MESSAGES CacheKey

* refactor: avoid saving messages while streaming, save partial text to cache instead

* fix(ci): processChunks

* chore: logging aborted request to debug

* feat: set stream rate for token processing

* chore: specify default stream rate

* fix(ci): Update AppService.js to use optional chaining for endpointLocals assignment

* refactor: abstract the error handler

* feat: streamRate for assistants; refactor: update default rate for token

* refactor: update error handling in assistants/errors.js

* refactor: update error handling in assistants/errors.js
2024-07-17 10:47:17 -04:00
Danny Avila
1c282d1517 🛣️ fix: Add validation middleware to message route definitions (#3365) 2024-07-17 10:27:07 -04:00
Marco Beretta
73dbf3eb20 🌐 feat: disable external engine if not configured (#3313)
* feat: disable external engine if not configured

* remove comment
2024-07-17 10:08:43 -04:00
Marco Beretta
237a0de8b6 🔄 feat: chat direction (LTR-RTL) (#3260)
* feat: chat direction

* fix: FileRow

* feat: smooth trigger transition
2024-07-17 10:08:13 -04:00
Marco Beretta
d5782ac66c feat: auto send text slider (#3312)
* feat: convert main component to float

* feat: convert the remaining components

* feat: use `recoilState` instead of `recoilValue`

* feat: replaced `AutoSendTextSwitch` to `AutoSendTextSelector`

* feat: use `autoSendText` in the `useSpeechToTextExternal` hook

* fix: `autoSendText` timeout
2024-07-17 10:07:11 -04:00
Danny Avila
d5d188eebf 🔐 fix: Enhance Message & Image Access Security (#3363)
* chore: slight refactor

* fix: prevent message updates unless explicitly owned

* refactor: rethrow errors, update deleteMessagesSince (not used), add basic tests

* fix: Add path normalization and validation to image request middleware

* fix: image validation path security
2024-07-17 09:51:03 -04:00
Marco Beretta
25ea3b8e98 revert lint 2 2024-07-16 17:18:15 +02:00
Marco Beretta
b1ec67ea42 revert lint 2024-07-16 17:17:43 +02:00
Marco Beretta
f1bb5fa4c5 revert back translation changes 2024-07-15 18:54:44 +02:00
Marco Beretta
44d8596872 fix: removed title button 2024-07-15 18:48:21 +02:00
Marco Beretta
32d84c85ea Merge branch 'main' into re-add-download-audio 2024-07-15 18:37:04 +02:00
Benedikt Nordhoff
0a1d38e318 🔧 fix: Make assistant tool filtering compatible with plugin filtering (#3325)
includedTools expects the pluginKey/tool.name for filtering included tools, which is incompatible with the previous implementation in loadAndFormatTools used for loading assistant tools.

This change uses both filename and tool.name for filtering tools in ToolService.loadAndFormatTools in order to make it compatible with the old implementation and the behaviour of plugin filtering.

Co-authored-by: Benedikt Nordhoff <benedikt.nordhoff@codecentric.de>
2024-07-12 08:26:35 -04:00
Danny Avila
5ef71a7a36 🪙 fix: Streaming Response Token Issue (#3323)
* chore: use NEW_CONVO constant

* fix: token object assign issue
2024-07-10 23:41:21 -04:00
Peter Dave Hello
326069d7a6 🌏 i18n: Improve Traditional Chinese translation (#3311) 2024-07-10 16:54:12 -04:00
ylioja
785430daf5 🌍 i18n: Added Finnish Translations (#3316) 2024-07-10 16:53:45 -04:00
Anirudh
03fe361917 🔧 fix+chore: Resolve Overflow in Settings Modal & Upgrade to Headless UI 2.0 (#2661)
* fix: dropdown overflow

* fix: make dropdown work on mobile

* feat: update headlessui to 2.0 and use portal

* feat: rewrite modal using headlessui

* fix: applying of maxHeight

* fix: optimize backdrop for dark mode

* fix: rendering dropdown width

* feat: match small screen layout to radix-ui dialog

* revert: mobile modifications

* fix: modal animations

* fix: z-index

* chore: Migrate from HeadlessUI 1.0 to 2.0

* fix: h2 nesting

* fix: use lighter border for PopoverButtons

* feat: Move modal to the top if using a small screen

* fix: mobile position

* fix: frontend tests

* feat: use row layout in mobile instead of col

* fix: remove config path from tsconfig

* fix: fix dropdown tests (gpt4o ftw!)

* feat: Upgrade to latest headlessui version

* fix:test1

* fix: ThemeSelector test

* fix: re-add speech tab

* style: use pl and pr-3

* fix: speech tab dropdowns

* style: use maxHeight for language

* feat: convert DropdownNoState to v2.0

* fix: use v2 params for voiceDropdown

* style: reduce maxHeight for VoiceDropdown and set fixed width

* chore: rebuild package-lock

* style(fix): copy over the same styles for the settingsTab

* style(fix): use -top-1 for speech tabs

* style(fix): use max-w-[400px]

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-07-10 16:45:58 -04:00
Marco Beretta
b34a4ddac1 🌩️ feat: cloud-based browser voices (#3297)
* initial voice support

* feat: local voices; feat: switch cloud-based voices

* feat: apply voice to hook
2024-07-10 16:44:12 -04:00
Mert Doğruca
7d5b03dd98 🗨️ fix: Reset prompt groups query when changing filters (#3294) 2024-07-10 16:43:02 -04:00
Marco Beretta
f959ee302c 🗣️ fix: get speechTab config; feature: not overriding variables selected by user (#3282)
* fix(Speech): speechTab settings update

* fix: get speech config; refactor: moved everything to types and removed file types; feature: not overriding variables selected by user
2024-07-10 16:38:36 -04:00
Marco Beretta
cd00df69bb 👤 feat: zoom and rotate avatar (#3264) 2024-07-10 16:38:02 -04:00
Marco Beretta
a05e2c1dcc 🗣️ feat: Azure OpenAI speech (#2985)
* feat: Azure STT

* feat: Azure TTS

* refactor: use enums

* fix: frontend tests

* fix(config): wrong key provider
2024-07-10 16:33:06 -04:00
Danny Avila
87bdbda10a 🗣️ fix: update API endpoint for fetching audio in StreamAudio (#3307) 2024-07-09 09:16:16 -04:00
GaelMartins0
605a8ae8c9 🌍 i18n: Update French Translations (#3240)
* French Translation Update

* French Translation Update
2024-07-09 08:32:01 -04:00
eniyiweb
a724635998 🌍 i18n: Update Turkish Translations (#3232)
* Language translation:Turkish translation update

* 🌍: Turkish Translation - Update

* Additional Turkish translations

* Additional Turkish translations

* Added mongo-express

* New translations added

* Update docker-compose.yml

* Turkish language update

* Turkish Translation - Update

* Turkish Translation - Update
2024-07-09 08:30:52 -04:00
Marco Beretta
6c306a662c 🔊 docs(librechat.example): update version (#3287)
* Update librechat.example.yaml

* Update config.ts
2024-07-07 19:00:45 +02:00
Marco Beretta
55f8d9910e ⚒️ fix(speechToText): OpenAI Provider (#3283) 2024-07-07 00:32:19 +02:00
Marco Beretta
7edb54889b 🔊 docs(librechat.example): update speech (#3281) 2024-07-06 16:38:58 -04:00
Marco Beretta
71d9e841b1 📝 docs: update password reset warning link (#3274) 2024-07-06 16:38:40 -04:00
Marco Beretta
e76777d298 💊 fix: OpenID proxy support for downloading profile pictures (#3263)
Related to #3261

Add proxy support to `downloadImage` function in `openidStrategy.js`

* Import `HttpsProxyAgent` from `https-proxy-agent`.
* Add `agent` property to the fetch options in `downloadImage` function if `process.env.PROXY` is set.
* Update the `fetch` call in `downloadImage` function to use the proxy agent if available.
2024-07-05 10:23:06 -04:00
Marco Beretta
1edbfdbce2 🔑 feat: infinite key expiry (#3252) 2024-07-05 10:15:09 -04:00
Marco Beretta
1aad315de6 🎤 feat: add custom speech config, browser TTS/STT features, and dynamic speech tab settings (#2921)
* feat: update useTextToSpeech and useSpeechToText hooks to support external audio endpoints

This commit updates the useTextToSpeech and useSpeechToText hooks in the Input directory to support external audio endpoints. It introduces the useGetExternalTextToSpeech and useGetExternalSpeechToText hooks, which determine whether the audio endpoints should be set to 'browser' or 'external' based on the value of the endpointTTS and endpointSTT Recoil states. The useTextToSpeech and useSpeechToText hooks now use these new hooks to determine whether to use external audio endpoints

* feat: add userSelect style to ConversationModeSwitch label

* fix: remove unused updateTokenWebsocket function and import

The updateTokenWebsocket function and its import are no longer used in the OpenAIClient module. This commit removes the function and import to clean up the codebase

* feat: support external audio endpoints in useTextToSpeech and useSpeechToText hooks

This commit updates the useTextToSpeech and useSpeechToText hooks in the Input directory to support external audio endpoints. It introduces the useGetExternalTextToSpeech and useGetExternalSpeechToText hooks, which determine whether the audio endpoints should be set to 'browser' or 'external' based on the value of the endpointTTS and endpointSTT Recoil states. The useTextToSpeech and useSpeechToText hooks now use these new hooks to determine whether to use external audio endpoints

* feat: update AutomaticPlayback component to AutomaticPlaybackSwitch; tests: added AutomaticPlaybackSwitch.spec
>
> This commit renames the AutomaticPlayback component to AutomaticPlaybackSwitch in the Speech directory. The new name better reflects the purpose of the component and aligns with the naming convention used in the codebase.

* feat: update useSpeechToText hook to include interimTranscript

This commit updates the useSpeechToText hook in the client/src/components/Chat/Input/AudioRecorder.tsx file to include the interimTranscript state. This allows for real-time display of the speech-to-text transcription while the user is still speaking. The interimTranscript is now used to update the text area value during recording.

* feat: Add customConfigSpeech API endpoint for retrieving custom speech configuration

This commit adds a new API endpoint  in the  file under the  directory. This endpoint is responsible for retrieving the custom speech configuration using the  function from the  module

* feat: update store var  and ; fix: getCustomConfigSpeech

* fix: client tests, removed unused import

* feat: Update useCustomConfigSpeechQuery to return an array of custom speech configurations

This commit modifies the useCustomConfigSpeechQuery function in the client/src/data-provider/queries.ts file to return an array of custom speech configurations instead of a single object. This change allows for better handling and manipulation of the data in the application

* feat: Update useCustomConfigSpeechQuery to return an array of custom speech configurations

* refactor: Update variable name in speechTab schema

* refactor: removed unused and nested code

* fix: using recoilState

* refactor: Update Speech component to use useCallback for setting settings

* fix: test

* fix: tests

* feature: ensure that the settings don't change after modifying then through the UI

* remove comment

* fix: Handle error gracefully in getCustomConfigSpeech and getVoices endpoints

* fix: Handle error

* fix: backend tests

* fix: invalid custom config logging

* chore: add back custom config info logging

* chore: revert loadCustomConfig spec

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-07-05 10:13:34 -04:00
Danny Avila
5d985746cb 🛠️ fix: Tool Filtering in PluginsClient (#3266)
* feat(plugins): implement tool filtering in PluginsClient

Add functionality to filter tools based on filteredTools and includedTools
arrays in the request's app locals. This allows for dynamic tool selection
on a per-request basis, enhancing the flexibility of the plugin system.

* test(plugins): add unit tests for tool filtering in PluginsClient

Introduce comprehensive test suite for the new tool filtering feature
in PluginsClient. Cover scenarios including filtering out tools,
including specific tools, prioritization of includedTools over
filteredTools, and behavior when no filters are provided.

* chore: Remove unused legacy Conversation component and update imports
2024-07-04 10:34:28 -04:00
Marco Beretta
04654014b2 📝 docs(README): update Railway referral code (#3242) 2024-07-03 13:37:56 +02:00
Danny Avila
456793772b 📂 fix: Add version selection for Assistants Endpoint uploads (#3236) 2024-06-29 21:56:03 -04:00
Danny Avila
a87d4e0b75 🛠️ fix: Update Conversation and Message Models to Return Objects Instead of Using Lean() (#3230) 2024-06-28 21:57:53 -04:00
Danny Avila
a2fd975cd5 🚤 refactor: Optimize Request Lifecycle Speeds (#3222)
* refactor: optimize backend operations for client requests

* fix: message styling

* refactor: Improve handleKeyUp logic in StreamRunManager.js and handleText.js

* refactor: Improve handleKeyUp logic in StreamRunManager.js and handleText.js

* fix: clear new convo messages on clear all convos

* fix: forgot to pass userId to getConvo

* refactor: update getPartialText to send basePayload.text
2024-06-28 08:44:47 -04:00
Danny Avila
83619de158 🗨️ feat: Prompt Slash Commands (#3219)
* chore: Update prompt description placeholder text

* fix: promptsPathPattern to not include new

* feat: command input and styling change for prompt views

* fix: intended validation

* feat: prompts slash command

* chore: localizations and fix add command during creation

* refactor(PromptsCommand): better label

* feat: update `allPrompGroups` cache on all promptGroups mutations

* refactor: ensure assistants builder is first within sidepanel

* refactor: allow defining emailVerified via create-user script
2024-06-27 17:34:48 -04:00
Arthur Barrett
b8f2bee3fc 📱fix: set initial nav visibility for small screens (#3208)
* fix: hide nav on small screens by default

* test: add spec for Nav component
2024-06-27 10:56:32 -04:00
Ghaith AlHallak
81292bb4dd 🔡 fix: Rendering of Bidirectional Text (#3195)
The fix has been applied only to key components where the rendering issue is significant
2024-06-27 10:56:12 -04:00
Danny Avila
ed5ee1f86f 🔧 refactor: Reduce Complexity of Initial Load Effect & Message Styling (#3213)
* move shared conditions and early bail to reduce cognitive complexity, improve readability

* refactor: make condition as close to the original as possible

* chore: adjust comment in chat route

* style: fix original styling of non-multi messages

* refactor: separate messagerender logic from 'Message'

---------

Co-authored-by: RehaS <beratson@gmail.com>
2024-06-27 10:48:41 -04:00
Danny Avila
791b0139bc 🎨 style: Improve Styling (#3205)
* style: add scrollbar-gutter to prevent layout shift

* style: better styling for simple/advanced tab and remove border-r on smaller screens

* style: better description styling

* style: make sure single response Messages style is the same as pre-multi-stream response feature
2024-06-25 14:28:05 -04:00
Danny Avila
156c52e293 🌿 feat: Multi-response Streaming (#3191)
* chore: comment back handlePlusCommand

* chore: ignore .git dir

* refactor: pass newConversation to `useSelectMention`

refactor: pass newConversation to Mention component

refactor: useChatFunctions for modular use of `ask` and `regenerate`

refactor: set latest message only for the first index in useChatFunctions

refactor: pass setLatestMessage to useChatFunctions

refactor: Pass setSubmission to useChatFunctions for submission handling

refactor: consolidate event handlers to separate hook from useSSE

WIP: additional response handlers

feat: responsive added convo, clears on new chat/navigating to chat, assistants excluded

feat: Add conversationByKeySelector to select any conversation by index

WIP: handle second submission with messages paired to root

* style: surface-primary-contrast

* refactor: remove unnecessary console.log statement in useChatFunctions

* refactor: Consolidate imports in ChatForm and Input hooks

* refactor: compositional usage of useSSE for multiple streams

* WIP: set latest 'multi' message

* WIP: first pass, added response streaming

* pass: performant multi-message stream

* fix: styling and message render

* second pass: modular, performant multi-stream

* fix: align parentMessageId of multiMessage

* refactor: move resetting latestMultiMessage

* chore: update footer text in Chat component

* fix: stop button styling

* fix: handle abortMessage request for multi-response

* clear messages but bug with latest message reset present

* fix: add delay for additional message generation

* fix: access LAST_CONVO_SETUP by index

* style: add div to prevent layout shift before hover buttons render

* chore: Update Message component styling for card messages

* chore: move hook use order

* fix: abort middleware using unsent field from req.body

* feat: support multi-response stream from initial message

* refactor: buildTree function to improve readability and remove unused code

* feat: add logger for frontend dev

* refactor: use depth to track if message is really last in its branch

* fix(buildTree): default export

* fix: share parent message Id and avoid duplication error for multi-response streams

* fix: prevent addedConvo reset to response convo

* feat: allow setting multi message as latest message to control which to respond to

* chore: wrap setSiblingIdxRev with useCallback

* chore: styling and allow editing messages

* style: styling fixes

* feat: Add "AddMultiConvo" component to Chat Header

* feat: prevent clearing added convos on endpoint, preset, mention, or modelSpec switch

* fix: message styling fixes, mainly related to code blocks

* fix: stop button visibility logic

* fix: Handle edge case in abortMiddleware for non-existant `abortControllers`

* refactor: optimize/memoize icons

* chore(GoogleClient): change info to debug logs

* style: active message styling

* style: prevent layout shift due to placeholder row

* chore: remove unused code

* fix: Update BaseClient to handle optional request body properties

* fix(ci): `onStart` now accepts 2 args, the 2nd being responseMessageId

* chore: bump data-provider
2024-06-25 03:02:38 -04:00
KiGamji
eef894e608 🌏 i18n: Improve clarity of English translation (#3154)
* 🌏 i18n: Improve clarity of English translation

* 🔧 fix(useCategories): replace i18n string to `com_ui_select_a_category`

* 🔨 refactor: avoid using placeholder strings where possible

This commit simplifies the internationalization approach for English language strings by removing the placeholder ones where they are used only once. This makes proper localization possible for Russian language, and possibly others.

Also renamed `com_ui_text_prompt` to `com_ui_prompt_text` to match the alphabetical order.

* 🎨 style(CreatePromptForm): add missing margin-top to the submit button
2024-06-24 13:47:20 -04:00
berat reha sönmez
e2867eecc9 🧹 chore: clean commented code (#3160) 2024-06-23 18:13:01 -04:00
Danny Avila
dd563e0796 📋 refactor: Prevent RTF Paste to Clipboard Only Plain Text (#3179) 2024-06-23 18:07:28 -04:00
Yuichi Oneda
c99cf1b4b1 🚅 chore: Added an Example of Nginx gzip Settings (#3173) 2024-06-23 13:49:00 -04:00
Matthew Unrath
b5081bfe86 🤖 feat: Add titling to Google client (#2983)
* feat: Add titling to Google client

* feat: Add titling to Google client

* PR feedback changes
2024-06-22 11:42:51 -04:00
Danny Avila
aac01df80c 🧹 chore: remove unnecessary try/catch when creating users (#3153) 2024-06-21 15:14:18 -04:00
Danny Avila
24467dd626 ⬆️ refactor: Improve Text Commands (#3152)
* refactor(useMentions): separate usage of `useSelectMention`

* refactor: separate handleKeyUp logic from useTextarea

* fix(Mention): cleanup blur timer

* refactor(handleKeyUp): improve command handling, prevent unintended re-trigger

* chore: remove console log

* chore: temporarily comment plus command
2024-06-21 12:34:28 -04:00
Danny Avila
b2b469bd3d 🌐 fix(actions): Correct URL Formation for Subdomains in createURL (#3149) 2024-06-21 11:07:45 -04:00
enz-PedroGruvhagen
cec2e57ee9 📝 chore: Update .env.example (#3142)
Added claude-3-5-sonnet-20240620 to the bunch so is available in the UI
2024-06-21 10:15:28 -04:00
Yuichi Oneda
a8c874267f 🚀 feat(LDAP): Add Flexible Configuration Options (#3124)
* chore: add detailed logs

* feat: added a variable to specify which attributes to be stored

* chore: Add new optiona variables

* refactor: change BIND_DN as an option

* chore: revert commits that fail testing

* refactor: use ldapid to retrieve users

* chore: remove unused variable

* chore: reverting unintended changes

* fix: return 404 if authentication fails, in accordance with requireLocalAuth.

* fix: handling when ldap settings do not exist

* chore: remove unnecessary check
2024-06-21 10:14:53 -04:00
Kurt Seifried
a53312bbd4 💻 feat: added env updater script (#3107) 2024-06-21 10:14:18 -04:00
Ikko Eltociear Ashimine
ab74685476 🐛 fix: Update resetConvo.ts (#3105)
Reseting -> Resetting
2024-06-21 10:13:21 -04:00
Yuichi Oneda
015215b790 🇯🇵 Fix: Incorrect Japanese Translation (#3119) 2024-06-21 10:13:02 -04:00
Yuichi Oneda
4e4de88faa 🔧 fix(Shared Links): Handling Shared Link Errors (#3118)
* refactor: add error handling in Share model

* refactor: add error handing to API routers

* refactor: display error message when API response is an error

* chore: remove unneccesary JSON.stringify

* chore: revert unintended changes

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-21 10:12:37 -04:00
Yuichi Oneda
3172381bad 🔧 fix(useTextArea): Incorrect New Line while Composing Japanese Text (#3103) 2024-06-21 09:59:31 -04:00
Marco Beretta
54b1095239 🎨 style: fix inconsistent HoverButtons and correct style issue in Continue button (#3100)
* style: fix inconsistent HoverButtons and fixed style bug in `continue` button

* quick fix; forgot to re-implment the logic

* feat: convo switch buttons
2024-06-21 09:58:38 -04:00
Marco Beretta
0424f8fe55 🎨 style: settings tab update (#3088)
* style: settings UI  update

* style: update UI

* style: update button style

* fix: scroll settings on mobile

* feat: `?` for settings
2024-06-21 09:58:04 -04:00
Peter Dave Hello
4319c62e66 🌏 i18n: Traditional Chinese language code to zh-TW (#3143) 2024-06-21 08:54:00 -04:00
Peter Dave Hello
d3a0b862db 🌏 i18n: Improve Traditional Chinese translation (#3144) 2024-06-21 08:53:18 -04:00
Danny Avila
5d8793c5d1 🗨️ refactor: Improve Prompts Query (#3138) 2024-06-20 22:21:17 -04:00
Danny Avila
54db67449a 🧠 feat: claude-3-5-sonnet (#3135)
* 🧠 feat: claude-3-5-sonnet

* chore: bump data-provider
2024-06-20 20:48:15 -04:00
Danny Avila
0cd3c83328 🗨️ feat: Prompts (#3131)
* 🗨️ feat: Prompts (#7)

* WIP: MERGE prompts/frontend (#1)

* added schema for prompt and promptgroup, added model methods for prompts, added routes for prompts

* * updated promptGroup Schema

* updated model methods for prompts (get, add, delete)

* slight fixes in prompt routes

* * Created Files Management components

* Created Vector Stores components

* Added file management route in the routes folder

* Completed UI for Files list, Compeleted UI for vector stores list, Completed UI for upload file modal, Completed UI for preview file, Completed UI for preview vector store

* Fixed style and UI fixes for file dashboard, file list and vector stores list

* added responsiveness classes for vector store page

* fixed responsiveness of file page, dashboard page, and main page

* fixed styling and responsiveness issues on dashboard page, file list page and vector store page

* added queries and mutations for prompts and promptGroups, added relevant endpoints in data-provider, added relevant components prompts, added and updated relevant APIs

* added types on mutation queries data service, updated prompt attributes

* feature: Prompts and prompt groups management, added relevant APIs, added types for data service/queries/mutations, added relevant mutation and queries

* chore: typing clarifications

* added drop down on prompts mgmt dashboard

* Fixes: fixed version switching issue on tags update or labels update, added cross button on create prompt group, fixed list updation on prompt group renaiming, added CSV upload button

* Feature: Added oneliner and category attributes in prompt group, added schema for categories, added schema methods and route for categories

* chore: typing and lint issues

* chore: more type and linter fixes

* chore: linting

* chore: prompt controller and backend typing example; MOVE TO CONTROLLER DIRECTORY

* chore: more type fixes

* style: prompt name changes

* chore: more type changes, and stateful prompt name change without flickering

* fix: Return result of savePrompt in patchPrompt API endpoint

* fix: navigation prompt queries; refactor: name 'prompt-groups' to just 'groups'

* refactor: fetch prompt groups rewrite

* refactor(prompts): query/mutation statefulness

* refactor: remove `isActive` field

* refactor: remove labels, consolidate logic

* style: width, layout shift

* refactor: improve hover toggle behavior and styling

* refactor: add useParams hook to PromptListItem for dynamic rendering and add timeout ref for blur timeout

* chore: hide upload button

* refactor: import Button component from correct location in PromptSidePanel

* style: prompt editor styling

* style: fix more layout shifts

* style: container scroll

* refactor: Rename CreatePrompt component to CreatePromptForm

* refactor: use react-hook-form

* refactor: Add Prompts components and routes to Dashboard

* style: skeletons for loading

* fix: optimize makePromptProduction

* refactor: consolidate variables

* feat: create prompt form validation

* refactor: Consolidate variables and update mutation hooks

* style: minor touchups

* chore: Update lucide-react npm dependency to version 0.394.0 and npm audit fix

* refactor: add a new icon for the Prompts heading.

* style: Update PromptsView heading to use h1 instead of h2 and other minor margin issues

* chore: wording

* refactor: Update PromptsView heading to use h1 instead of h2, consolidate variables, and add new icons

* refactor: Prompts Button for Mobile

* feature: added category field in prompt group, added relevant API and static data on BE to support FE UI for category in prompt group

* chore: template for prompt cards

---------

Co-authored-by: Fawadpot <contactfawada@gmail.com>

* WIP: Prompts/frontend Continued (#2)

* chore: loading style, remove unused component

* feat: Add CategorySelector component for prompt group category selection

* feat: add categories to create prompt

* feat: prompt versions styling

* feat: optimistic updates for prompt production state

* refactor: optimize form state and show if prompt field is dirty with cross icon, also other styling changes

* chore: remove unused code and localizations

* fix: light mode styling

* WIP: SidePanel Prompts

* refactor: move to groups directory

* refactor: rename GroupsSidePanel to GroupSidePanel and update imports

* style: ListCard

* refactor: isProduction changes

* refactor: infinite query with productionPrompt

* refactor: optimize snippets and prompts, and styling

* refactor: Update getSnippet function to accept a length parameter

* chore: localizations

* feat: prompts navigation to chat and vice versa

* fix: create prompt

* feat: remember last selected category for creating prompts

* fix(promptGroups): fix pagination and add usePromptGroupsNav hook

* Prompts/frontend 3 (#3)

* fix: stateful issues with prompt groups

* style: improved layout

* refactor: improve variable naming in Eng.ts

* refactor: theme selector styling improvements

* added prompt cards on chat new page, with dark mode, added API to fetch random prompts, added types for useQuery

Slightly improved usePromptGroupNav logic to fetch updated result for pageSize, updated prompt cards view with darkmode and responsiveness

fixed page size option buttons styling to match the theme

added dark mode on create prompt page and prompt edit/preview page

fixed page size option buttons styling to match the theme

added dark mode on create prompt page and prompt edit/preview page

* WIP: Prompts/frontend (#4)

* fix: optimize and fix paginated query

* fix: remove unique constraint on names

* refactor: button links and styling

* style: menu border light mode

* feat: Add Auto-Send Switch component for prompts groups

* refactor(ChatView): use form context for submission text

* chore: clear convo state on navigation to dashboard routes

* chore: save prompt edit name on tab, remove console log

* feat: basic prompt submission

* refactor: move Auto-Send Switch

* style(ListCard): border styling

* feat: Add function to detect variables in text

* feat: Add OriginalDialog component to UI library

* chore(ui): Update SelectDropDown options list class to use text-xs size

* refactor: submitMessage hook now includes submitPrompt, make compatible to document query selector

* WIP: Variable Dialog

* feat: variable submission working for both auto-send and non-autosend

* feat: dashboard breadcrumbs and prompts/chat navigation

* refactor: dashboard breadcrumb and dashboard link to chat navigation

* refactor: Update VariableDialog and VariableForm styles

* Prompts: Admin features (#5)

* fix: link issue

* fix: usePromptGroupsNav add missing dep.

* style: dashbreadcrumb and sidepanel text color

* temp fix: remove refetch on pageNumber change

* fix: handle multiple variable replacement

* WIP: create project schema and add project groups to fetch

* feat: Add functionality to add prompt group IDs to a project

* feat: Add caching for startup config in config route

* chore: remove prompt landing

* style: Update Skeleton component with additional background styling

* chore: styling and types

* WIP: SharePrompt first draft

* feat(SharePrompt): form validation

* feat: shared global indicators

* refactor: prompt details

* refactor: change NoPromptGroup directory

* feat: preview prompt

* feat: remove/add global prompts, add rbac-related enums

* refactor: manage prompts location

* WIP: first draft admin settings for prompts

* feat: SystemRoles enum

* refactor: update PromptDetails component styling

* style: ellipsis custom class for showing more preview text

* WIP: initial role schema and initialization

* style: improved margins for single unordered lists

* fix: use custom chat form context to prevent re-renders from FormProvider

* feat: Role mutations for Prompt Permissions

* feat: fetch user role

* feat: update AdminSettings form default values from user role values

* refactor: rename PromptPermissions to Permissions for general definitions

* feat: initial role checks

* feat: Add optional `bodyProps` parameter to generateCheckAccess middleware

* refactor: UI access checks

* Prompts: delete (#6)

* Fixed delete prompt version API, fixed types and logic for prompt version deletion, updated prompt delete mutation logic

* chore: Update return type of deletePrompt function in Prompt.js

---------

Co-authored-by: Fawadpot <contactfawada@gmail.com>

* chore: Update package-lock.json version to 0.7.4-rc1 and fast-xml-parser to 4.4.0

* feat: toast for saving admin settings, add timer no-access navigation

* feat: always make prod

* feat: Add localization to category labels in CategorySelector component

* feat: Update category label localization in CategorySelector component

* fix: Enable making prompt production in Prompt API

---------

Co-authored-by: Fawadpot <contactfawada@gmail.com>

* feat: Add helper fn for dark mode detection in ThemeProvider

* style: surface-primary definition

* fix(useHasAccess): utilize user.role and not just USER role

* fix: empty category and role fetch

* refactort: increase max height to options list and use label if no localization is found

* fix: update CategorySelector to handle empty category value and improve localization

* refactor: move prompts to own store/reactquery modules, add in filter WIP

* refactor: Rename AutoSendSwitch to AutoSendPrompt

* style: theming commit

* style: fix slight coloring issue for convos in dark mode

* style: better composition for prompts side panel

* style: remove gray-750 and make it gray-850

* chore: adjust theming

* feat: filter all prompt groups and properly remove prompts from projects

* refactor: optimize delete prompt groups further

* chore: localization

* feat: Add uniqueProperty filtering to normalizeData function

* WIP: filter prompts

* chore: Update FilterPrompts component to include User icon in FilterItem

* feat(FilterPrompts): set categories

* feat: more system filters and show selected category icon

* style: always make prod, flips switch to avoid mis-clicks

* style: ui/ux loading/no prompts

* chore: style FilterPrompts ChatView

* fix: handle missing role edge case

* style: special variables

* feat: special variables

* refactor: improve replaceSpecialVars function in prompts.ts

* feat: simple/advanced editor modes

* chore: bump versions

* feat: localizations and hide production button on simple mode

* fix: error connecting layout shift

* fix: prompts CRUD for admins

* fix: secure single group fetch

* style: sidepanel styling

* style(PromptName): bring edit button closer to name

* style: mobile prompts header

* style: mobile prompts header continued

* style: align send prompts switch right

* feat: description

* Update special variables description in Eng.ts

* feat: update/create/preview oneliner

* fix: allow empty oneliner update

* style: loading improvement and always make selected prompt Production if simple mode

* fix: production index set and remove unused props

* fix(ci): mock initializeRoles

* fix: address #3128

* fix: address #3128

* feat: add deletion confirmation dialog

* fix: mobile UI issues

* style: prompt library UI update

* style: focus, logcal tab order

* style: Refactor SelectDropDown component to improve code readability and maintainability

* chore: bump data-provider

* chore: fix labels

* refactor: confirm delete prompt version

---------

Co-authored-by: Marco Beretta <81851188+berry-13@users.noreply.github.com>
2024-06-20 20:24:32 -04:00
Marco Beretta
d839e4661c feat: download audio file support 2024-06-18 16:24:37 +02:00
Danny Avila
302b28fc9b v0.7.4-rc1 (#3099)
* fix(openIdStrategy): return user object on new user creation

*  v0.7.4-rc1
2024-06-17 12:47:28 -04:00
Marco Beretta
dad25bd297 📝 docs: update README (#3093) 2024-06-17 07:51:13 -04:00
Marco Beretta
a338decf90 ✉️ fix: email address encoding in verification link (#3085)
Related to #3084

Implements URL encoding for email addresses in verification links and decodes them upon verification.

- **Encode email addresses** in `sendVerificationEmail` and `resendVerificationEmail` functions using `encodeURIComponent` to ensure special characters like `+` are correctly handled in the verification link.
- **Decode email addresses** in the `verifyEmail` function using `decodeURIComponent` to accurately retrieve and validate the email address from the verification link against the database.


---

For more details, open the [Copilot Workspace session](https://copilot-workspace.githubnext.com/danny-avila/LibreChat/issues/3084?shareId=9c32df30-4156-4082-a3eb-fff54eaba5b3).
2024-06-16 16:05:53 -04:00
Danny Avila
2cf5228021 🕑 fix: Add Suspense to Connection Error Messages (#3074) 2024-06-15 16:16:06 -04:00
Danny Avila
0294cfc881 v0.7.3 (#3067)
* refactor: revert BaseClient to use node-fetch instead of undici

* chore: bump version

* chore: update npm dependencies to latest versions

* chore: fix custom footer
2024-06-15 12:17:10 -04:00
Yuichi Oneda
8d8b17e7ed 🚀 feat: Add Option to Disable Shared Links (#2986)
* feat: add option to disable shared links

* chore: update languages

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-15 11:12:03 -04:00
Danny Avila
04502e9525 👤 fix: Create User with timestamps (#3070)
* 👤 fix: Create User with timestamps

* chore: fix lint script to ignore venv

* chore: linting
2024-06-15 10:36:49 -04:00
btribonde
bcaa7d5d29 🛤️ feat: Proxy Support for OpenID Login (#3051)
https://github.com/danny-avila/LibreChat/issues/3041
2024-06-15 09:41:34 -04:00
Marco Beretta
c288b458b6 📝 docs: update README's features (#3011)
* Update README.md

* Update README.md

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-15 09:36:05 -04:00
Neelesh Kumar
447bbcb8ca 📝 chore: Update .env.example with Custom Endpoint API Key examples (#2970)
Add env variables for cohere and databricks
2024-06-15 09:31:39 -04:00
Marco Beretta
68bf7ac7c0 🪲 fix(useTextarea): enterToSend bugs (#2988)
Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-15 09:30:19 -04:00
Denis Palnitsky
97d12d03d1 📊 feat: Google tag manager integration (#2469)
* Google tag manager integration

* change location of react-gtm-module package

* refactor: move react-gtm-module usage from Chat/Footer to useAppStartup hook

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-15 08:09:18 -04:00
Danny Avila
4416f69a9b 🚀 refactor: Use Undici Instead of Node-Fetch, prevent Event Close, add Index (#3052)
* feat: Add index to conversationId field in messageSchema

* refactor: prevent immediate event close on error

* refactor: use undici instead of node-fetch in non-Bun environment
2024-06-13 13:38:15 -04:00
Yuichi Oneda
29e71e98ad ✍️ feat: Automatic Save and Restore for Chat (#2942)
* feat: added "Save draft locally" to Message settings

* feat: add hook to save chat input as draft every second

* fix: use filepath if the file does not have a preview prop

* fix: not to delete temporary files when navigating to a new chat

* chore: translations

* chore: import order

* chore: import order

---------

Co-authored-by: Danny Avila <danacordially@gmail.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-13 09:52:30 -04:00
Jakub Mieszczak
e9bbf39618 📝 feat: Markdown support for Custom Footer links (#2960)
* feat: Add markdown support for custom footer links

* fix: Update Footer component with revised ReactMarkdown props

* fix: Adjusted footer links and pipe styles for consistent appearance

* refactor: remove unused footer.tsx file
2024-06-13 09:51:28 -04:00
Danny Avila
08b8ae120e 🤖 fix(Assistants): Ensure Required Actions always have Outputs (#3031) 2024-06-10 22:01:52 -04:00
Danny Avila
803fd63121 📋 fix(handlePaste): Remove Custom rich-text handling (#3025)
* fix: rich text edge case

* fix(useTextarea): removing custom handling fixes RTF slow loading, default behavior with form prefers plain-text
2024-06-10 14:36:51 -04:00
Yuichi Oneda
ef76cc195e 🎨 style(Textarea): Message Edit Textarea Styling (#3009) 2024-06-10 13:01:07 -04:00
Danny Avila
2e559137ae ℹ️ refactor: Remove use of Agenda for Conversation Imports (#3024)
* chore: remove agenda and npm audit fix

* refactor: import conversations without agenda

* chore: update package-lock.json and data-provider version to 0.6.7

* fix: import conversations

* chore: client npm audit fix
2024-06-10 13:00:34 -04:00
Danny Avila
92232afaca 📧 fix: Cancel Signup if Email Issuance Fails (#3010)
* fix: user.id assignment in jwtStrategy.js

* refactor(sendEmail): pass params as object, await email sending to propogate errors and restrict registration flow

* fix(Conversations): handle missing updatedAt field

* refactor: use `processDeleteRequest` when deleting user account for user file deletion

* refactor: delete orphaned files when deleting user account

* fix: remove unnecessary 404 status code in server/index.js
2024-06-08 06:51:29 -04:00
Danny Avila
084cf266a2 🗃️ fix: revise RAG prompt (#3006) 2024-06-07 20:51:43 -04:00
Danny Avila
baf0848021 📧 fix: LDAP login after User verification changes (#3003) 2024-06-07 17:43:36 -04:00
Danny Avila
1da92111aa 🚀 refactor: Remove Local Login Redundancies (#3002) 2024-06-07 16:45:31 -04:00
Danny Avila
35f8053f45 📧 fix: Ensure User Verification for Instances without Email Service (#2998) 2024-06-07 15:43:43 -04:00
Marco Beretta
ee673d682e 📧 feat: email verification (#2344)
* feat: verification email

* chore: email verification invalid; localize: update

* fix: redirect to login when signup: fix: save emailVerified correctly

* docs: update ALLOW_UNVERIFIED_EMAIL_LOGIN; fix: don't accept login only when ALLOW_UNVERIFIED_EMAIL_LOGIN = true

* fix: user needs to be authenticated

* style: update

* fix: registration success message and redirect logic

* refactor: use `isEnabled` in ALLOW_UNVERIFIED_EMAIL_LOGIN

* refactor: move checkEmailConfig to server/utils

* refactor: use req as param for verifyEmail function

* chore: jsdoc

* chore: remove console log

* refactor: rename `createNewUser` to `createSocialUser`

* refactor: update typing and add expiresAt field to userSchema

* refactor: begin use of user methods over direct model access for User

* refactor: initial email verification rewrite

* chore: typing

* refactor: registration flow rewrite

* chore: remove help center text

* refactor: update getUser to getUserById and add findUser methods. general fixes from recent changes

* refactor: Update updateUser method to remove expiresAt field and use $set and $unset operations, createUser now returns Id only

* refactor: Update openidStrategy to use optional chaining for avatar check, move saveBuffer init to buffer condition

* refactor: logout on deleteUser mutatation

* refactor: Update openidStrategy login success message format

* refactor: Add emailVerified field to Discord and Facebook profile details

* refactor: move limiters to separate middleware dir

* refactor: Add limiters for email verification and password reset

* refactor: Remove getUserController and update routes and controllers accordingly

* refactor: Update getUserById method to exclude password and version fields

* refactor: move verification to user route, add resend verification option

* refactor: Improve email verification process and resend option

* refactor: remove more direct model access of User and remove unused code

* refactor: replace user authentication methods and token generation

* fix: add user.id to jwt user

* refactor: Update AuthContext to include setError function, add resend link to Login Form, make registration redirect shorter

* fix(updateUserPluginsService): ensure userPlugins variable is defined

* refactor: Delete all shared links for a specific user

* fix: remove use of direct User.save() in handleExistingUser

* fix(importLibreChatConvo): handle missing createdAt field in messages

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-07 15:06:47 -04:00
Marco Beretta
b7fef6958b 🔒refactor: social login and remove direct user model access in strategies (#2946)
* refactor: checking `ALLOW_SOCIAL_REGISTRATION` with `isEnabled`

* feat: Add findUserByEmail function to UserService

This commit adds a new function, , to the  module. This function retrieves a user document from the database based on the provided email. It returns the user document if found, otherwise it returns null. If there is a problem during user retrieval, an error is thrown.

* refactor: add socialLogin to remove repetitive code
2024-06-06 13:23:11 -04:00
Marco Beretta
5452d4c20c 🔒 feat: password reset disable option; fix: account email error message (#2327)
* feat: password reset  disable option; fix: account email leak

* fix(LoginSpec): typo

* test: fixed LoginForm test

* fix: disable password reset when undefined

* refactor: use a helper function

* fix: tests

* feat: Remove unused error message in password reset process

* chore: Update password reset email message

* refactor: only allow password reset if explicitly allowed

* feat: Add password reset email service configuration check

The code changes in `checks.js` add a new function `checkPasswordReset()` that checks if the email service is configured when password reset is enabled. If the email service is not configured, a warning message is logged. This change ensures secure password reset functionality by prompting the user to configure the email service.

Co-authored-by: Berry-13 <root@Berry>
Co-authored-by: Danny Avila <messagedaniel@protonmail.com>
Co-authored-by: Danny Avila <danny@librechat.ai>

* chore: remove import order rules

* refactor: simplify password reset logic and align against Observable Response Discrepancy

* chore: make password reset warning more prominent

* chore(AuthService): better logging for password resets, refactor requestPasswordReset to use req object, fix sendEmail error when email config is not present

* refactor: fix styling of password reset email message

* chore: add missing type for passwordResetEnabled, TStartupConfig

* fix(LoginForm): prevent login form flickering

* fix(ci): Update login form to use mocked startupConfig for rendering correctly

* refactor: Improve password reset UI, applies DRY

* chore: Add logging to password reset validation middleware

* chore(CONTRIBUTING): Update import order conventions

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
Co-authored-by: Berry-13 <root@Berry>
Co-authored-by: Danny Avila <messagedaniel@protonmail.com>
2024-06-06 11:39:36 -04:00
Marco Beretta
a7f5b57272 🚫👤feat: delete user from UI (#1526)
* initial commit

* fix: UserController bugs; fix: lint errors

* fix: delete files

* language support

* style(DeleteAccount): update to the latest style

* style: fix after merge main

* chore: Add canDeleteAccount middleware for user deletion endpoint

* chore: renamed to ALLOW_ACCOUNT_DELETION

* fix(canDeleteAccount): use uppercase admin role

* chore: imports order

* chore: Enable account deletion by default if omitted/commented out

* chore: Add logging for user account deletion

* chore: Bump data-provider package version to 0.6.6

* chore: Import Transaction model in UserController

* chore: Update CONFIG_VERSION to 1.1.4

* chore: Update user account deletion logging

* chore: Refactor user account deletion logic

---------

Co-authored-by: Berry-13 <root@Berry>
Co-authored-by: Danny Avila <messagedaniel@protonmail.com>
Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-05 19:35:12 -04:00
Marco Beretta
f69b317171 🔧 fix(useMentions): handle empty assistant lists (#2966)
The useMentions hook in the client/src/hooks/Input/useMentions.ts file has been updated to handle cases where the assistant lists for the endpoints 'assistants' and 'azureAssistants' are empty. This change ensures that the hook does not throw an error when attempting to access assistantListMap[EModelEndpoint.assistants] or assistantListMap[EModelEndpoint.azureAssistants]. Instead, it defaults to an empty array for these cases.
2024-06-05 14:56:15 -04:00
Yuichi Oneda
4469ba72fc 🧹 chore(.eslintrc.js): Update Import Order of The React Types (#2964) 2024-06-05 14:55:42 -04:00
Yuichi Oneda
0e3e45e77d 🔧 chore: Add import/order Eslint Rule (#2928)
* chore: add import order eslint rule

* refactor: apply 'import/order' rule
2024-06-04 08:56:26 -04:00
Marco Beretta
9f0c1914a5 🎂 fix: birthday icon (#2950)
* fix: tooltip and birthday icon

* chore: update conditional render

---------

Co-authored-by: Danny Avila <danny@librechat.ai>
2024-06-04 08:55:41 -04:00
Ventz Petkov
37ae484fbc 🚅 docs: Updated Example for LiteLLM ports and Volume mount (#2941)
* Added necessary "ports" section for it to work by default
* Added (commented out) example GCP Vertex volume mount for auth config and for ENV variable.
2024-06-01 08:51:18 -04:00
Danny Avila
8939d8af37 🦙 feat: Add LLama 3 System Context Length (#2938) 2024-05-31 12:16:08 -04:00
Danny Avila
f9a0166352 🔄 refactor(EditPresetDialog): Update Model on Endpoint Change (#2936)
* refactor(EditPresetDialog): dynamically update current editable preset model on endpoint change

* feat: Add null check for models in EditPresetDialog

* chore(AlertDialogPortal): typing

* fix(EditPresetDialog): prevent Unknown endpoint edge case for custom endpoints
2024-05-31 11:43:14 -04:00
Danny Avila
248dfb8b5b 🐛 fix: Resolve Preset Button Disappearing in Mobile View (#2935)
* refactor: Update import paths for ExportAndShareMenu component and add localization

* fix: mobile view for export/share button
2024-05-31 08:46:09 -04:00
573 changed files with 27507 additions and 8567 deletions

View File

@@ -64,6 +64,8 @@ PROXY=
# ANYSCALE_API_KEY=
# APIPIE_API_KEY=
# COHERE_API_KEY=
# DATABRICKS_API_KEY=
# FIREWORKS_API_KEY=
# GROQ_API_KEY=
# HUGGINGFACE_TOKEN=
@@ -78,7 +80,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_MODELS=claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -121,6 +123,8 @@ GOOGLE_KEY=user_provided
# Vertex AI
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
# GOOGLE_TITLE_MODEL=gemini-pro
# Google Gemini Safety Settings
# NOTE (Vertex AI): You do not have access to the BLOCK_NONE setting by default.
# To use this restricted HarmBlockThreshold setting, you will need to either:
@@ -140,7 +144,7 @@ GOOGLE_KEY=user_provided
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
# OPENAI_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -162,7 +166,7 @@ DEBUG_OPENAI=false
ASSISTANTS_API_KEY=user_provided
# ASSISTANTS_BASE_URL=
# ASSISTANTS_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#==========================#
# Azure Assistants API #
@@ -184,7 +188,7 @@ ASSISTANTS_API_KEY=user_provided
# Plugins #
#============#
# PLUGIN_MODELS=gpt-4o,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
DEBUG_PLUGINS=true
@@ -319,6 +323,9 @@ ALLOW_EMAIL_LOGIN=true
ALLOW_REGISTRATION=true
ALLOW_SOCIAL_LOGIN=false
ALLOW_SOCIAL_REGISTRATION=false
ALLOW_PASSWORD_RESET=false
# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
SESSION_EXPIRY=1000 * 60 * 15
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
@@ -367,6 +374,11 @@ LDAP_BIND_CREDENTIALS=
LDAP_USER_SEARCH_BASE=
LDAP_SEARCH_FILTER=mail={{username}}
LDAP_CA_CERT_PATH=
# LDAP_TLS_REJECT_UNAUTHORIZED=
# LDAP_LOGIN_USES_USERNAME=true
# LDAP_ID=
# LDAP_USERNAME=
# LDAP_FULL_NAME=
#========================#
# Email Password Reset #
@@ -394,6 +406,13 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
#========================#
# Shared Links #
#========================#
ALLOW_SHARED_LINKS=true
ALLOW_SHARED_LINKS_PUBLIC=true
#===================================================#
# UI #
#===================================================#
@@ -404,6 +423,9 @@ HELP_AND_FAQ_URL=https://librechat.ai
# SHOW_BIRTHDAY_ICON=true
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
#==================================================#
# Others #
#==================================================#

View File

@@ -126,6 +126,18 @@ Apply the following naming conventions to branches, labels, and other Git-relate
- **Current Stance**: At present, this backend transition is of lower priority and might not be pursued.
## 7. Module Import Conventions
- `npm` packages first,
- from shortest line (top) to longest (bottom)
- Followed by typescript types (pertains to data-provider and client workspaces)
- longest line (top) to shortest (bottom)
- types from package come first
- Lastly, local imports
- longest line (top) to shortest (bottom)
- imports with alias `~` treated the same as relative import with respect to line length
---

2
.gitignore vendored
View File

@@ -11,6 +11,7 @@ logs
pids
*.pid
*.seed
.git
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
@@ -45,6 +46,7 @@ api/node_modules/
client/node_modules/
bower_components/
*.d.ts
!vite-env.d.ts
# Floobits
.floo

View File

@@ -1,4 +1,4 @@
# v0.7.2
# v0.7.3
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,4 +1,4 @@
# v0.7.2
# v0.7.3
# Build API, Client and Data Provider
FROM node:20-alpine AS base

View File

@@ -27,7 +27,7 @@
</p>
<p align="center">
<a href="https://railway.app/template/b5k2mn?referralCode=myKrVZ">
<a href="https://railway.app/template/b5k2mn?referralCode=HI9hWz">
<img src="https://railway.app/button.svg" alt="Deploy on Railway" height="30">
</a>
<a href="https://zeabur.com/templates/0X2ZY8">
@@ -50,7 +50,7 @@
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
- 🌿 Fork Messages & Conversations for Advanced Context control
- 💬 Multimodal Chat:
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o`), and Gemini Vision 📸
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o` and `gpt-4o-mini`), and Gemini Vision 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
@@ -58,9 +58,13 @@
- 🌎 Multilingual UI:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers.
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers
- 📧 Verify your email to ensure secure access
- 🗣️ Chat hands-free with Speech-to-Text and Text-to-Speech magic
- Automatically send and play Audio
- Supports OpenAI, Azure OpenAI, and Elevenlabs
- 📥 Import Conversations from LibreChat, ChatGPT, Chatbot UI
- 📤 Export conversations as screenshots, markdown, text, json.
- 📤 Export conversations as screenshots, markdown, text, json
- 🔍 Search all messages/conversations
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
@@ -77,7 +81,7 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
[![Watch the video](https://img.youtube.com/vi/YLVUW5UP9N0/maxresdefault.jpg)](https://www.youtube.com/watch?v=YLVUW5UP9N0)
[![Watch the video](https://img.youtube.com/vi/bSVHEbVPNl4/maxresdefault.jpg)](https://www.youtube.com/watch?v=bSVHEbVPNl4)
Click on the thumbnail to open the video☝
---

View File

@@ -2,8 +2,10 @@ const Anthropic = require('@anthropic-ai/sdk');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
getResponseSender,
Constants,
EModelEndpoint,
anthropicSettings,
getResponseSender,
validateVisionModel,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
@@ -16,6 +18,7 @@ const {
} = require('./prompts');
const spendTokens = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -29,6 +32,8 @@ function delayBeforeRetry(attempts, baseDelay = 1000) {
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
}
const { legacy } = anthropicSettings;
class AnthropicClient extends BaseClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
@@ -61,15 +66,20 @@ class AnthropicClient extends BaseClient {
const modelOptions = this.options.modelOptions || {};
this.modelOptions = {
...modelOptions,
// set some good defaults (check for undefined in some cases because they may be 0)
model: modelOptions.model || 'claude-1',
temperature: typeof modelOptions.temperature === 'undefined' ? 1 : modelOptions.temperature, // 0 - 1, 1 is default
topP: typeof modelOptions.topP === 'undefined' ? 0.7 : modelOptions.topP, // 0 - 1, default: 0.7
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
stop: modelOptions.stop, // no stop method for now
model: modelOptions.model || anthropicSettings.model.default,
};
this.isClaude3 = this.modelOptions.model.includes('claude-3');
this.isLegacyOutput = !this.modelOptions.model.includes('claude-3-5-sonnet');
if (
this.isLegacyOutput &&
this.modelOptions.maxOutputTokens &&
this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
) {
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
}
this.useMessages = this.isClaude3 || !!this.options.attachments;
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
@@ -119,10 +129,11 @@ class AnthropicClient extends BaseClient {
/**
* Get the initialized Anthropic client.
* @param {Partial<Anthropic.ClientOptions>} requestOptions - The options for the client.
* @returns {Anthropic} The Anthropic client instance.
*/
getClient() {
/** @type {Anthropic.default.RequestOptions} */
getClient(requestOptions) {
/** @type {Anthropic.ClientOptions} */
const options = {
fetch: this.fetch,
apiKey: this.apiKey,
@@ -136,6 +147,12 @@ class AnthropicClient extends BaseClient {
options.baseURL = this.options.reverseProxyUrl;
}
if (requestOptions?.model && requestOptions.model.includes('claude-3-5-sonnet')) {
options.defaultHeaders = {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
};
}
return new Anthropic(options);
}
@@ -556,8 +573,6 @@ class AnthropicClient extends BaseClient {
}
logger.debug('modelOptions', { modelOptions });
const client = this.getClient();
const metadata = {
user_id: this.user,
};
@@ -585,7 +600,7 @@ class AnthropicClient extends BaseClient {
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens = maxOutputTokens || 1500;
requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
} else {
requestOptions.prompt = payload;
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
@@ -605,12 +620,14 @@ class AnthropicClient extends BaseClient {
};
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
async function processResponse() {
let attempts = 0;
while (attempts < maxRetries) {
let response;
try {
const client = this.getClient(requestOptions);
response = await this.createResponse(client, requestOptions);
signal.addEventListener('abort', () => {
@@ -627,6 +644,8 @@ class AnthropicClient extends BaseClient {
} else if (completion.completion) {
handleChunk(completion.completion);
}
await sleep(streamRate);
}
// Successful processing, exit loop
@@ -737,7 +756,11 @@ class AnthropicClient extends BaseClient {
};
try {
const response = await this.createResponse(this.getClient(), requestOptions, true);
const response = await this.createResponse(
this.getClient(requestOptions),
requestOptions,
true,
);
let promptTokens = response?.usage?.input_tokens;
let completionTokens = response?.usage?.output_tokens;
if (!promptTokens) {

View File

@@ -1,10 +1,11 @@
const crypto = require('crypto');
const fetch = require('node-fetch');
const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { supportsBalanceCheck, Constants, CacheKeys, Time } = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
const { getFiles } = require('~/models/File');
const { getLogStores } = require('~/cache');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -19,6 +20,14 @@ class BaseClient {
day: 'numeric',
});
this.fetch = this.fetch.bind(this);
/** @type {boolean} */
this.skipSaveConvo = false;
/** @type {boolean} */
this.skipSaveUserMessage = false;
/** @type {ClientDatabaseSavePromise} */
this.userMessagePromise;
/** @type {ClientDatabaseSavePromise} */
this.responsePromise;
}
setOptions() {
@@ -69,6 +78,9 @@ class BaseClient {
url = this.options.reverseProxyUrl;
}
logger.debug(`Making request to ${url}`);
if (typeof Bun !== 'undefined') {
return await fetch(url, init);
}
return await fetch(url, init);
}
@@ -81,19 +93,45 @@ class BaseClient {
await stream.processTextStream(onProgress);
}
/**
* @returns {[string|undefined, string|undefined]}
*/
processOverideIds() {
/** @type {Record<string, string | undefined>} */
let { overrideConvoId, overrideUserMessageId } = this.options?.req?.body ?? {};
if (overrideConvoId) {
const [conversationId, index] = overrideConvoId.split(Constants.COMMON_DIVIDER);
overrideConvoId = conversationId;
if (index !== '0') {
this.skipSaveConvo = true;
}
}
if (overrideUserMessageId) {
const [userMessageId, index] = overrideUserMessageId.split(Constants.COMMON_DIVIDER);
overrideUserMessageId = userMessageId;
if (index !== '0') {
this.skipSaveUserMessage = true;
}
}
return [overrideConvoId, overrideUserMessageId];
}
async setMessageOptions(opts = {}) {
if (opts && opts.replaceOptions) {
this.setOptions(opts);
}
const [overrideConvoId, overrideUserMessageId] = this.processOverideIds();
const { isEdited, isContinued } = opts;
const user = opts.user ?? null;
this.user = user;
const saveOptions = this.getSaveOptions();
this.abortController = opts.abortController ?? new AbortController();
const conversationId = opts.conversationId ?? crypto.randomUUID();
const conversationId = overrideConvoId ?? opts.conversationId ?? crypto.randomUUID();
const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID();
const userMessageId =
overrideUserMessageId ?? opts.overrideParentMessageId ?? crypto.randomUUID();
let responseMessageId = opts.responseMessageId ?? crypto.randomUUID();
let head = isEdited ? responseMessageId : parentMessageId;
this.currentMessages = (await this.loadHistory(conversationId, head)) ?? [];
@@ -157,7 +195,7 @@ class BaseClient {
}
if (typeof opts?.onStart === 'function') {
opts.onStart(userMessage);
opts.onStart(userMessage, responseMessageId);
}
return {
@@ -447,8 +485,13 @@ class BaseClient {
this.handleTokenCountMap(tokenCountMap);
}
if (!isEdited) {
await this.saveMessageToDatabase(userMessage, saveOptions, user);
if (!isEdited && !this.skipSaveUserMessage) {
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessagePromise: this.userMessagePromise,
});
}
}
if (
@@ -497,15 +540,23 @@ class BaseClient {
const completionTokens = this.getTokenCount(completion);
await this.recordTokenUsage({ promptTokens, completionTokens });
}
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
if (this.userMessagePromise) {
await this.userMessagePromise;
}
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
delete responseMessage.tokenCount;
return responseMessage;
}
async getConversation(conversationId, user = null) {
return await getConvo(user, conversationId);
}
async loadHistory(conversationId, parentMessageId = null) {
logger.debug('[BaseClient] Loading history:', { conversationId, parentMessageId });
@@ -560,22 +611,41 @@ class BaseClient {
* @param {string | null} user
*/
async saveMessageToDatabase(message, endpointOptions, user = null) {
await saveMessage({
...message,
endpoint: this.options.endpoint,
unfinished: false,
user,
});
await saveConvo(user, {
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
});
if (this.user && user !== this.user) {
throw new Error('User mismatch.');
}
const savedMessage = await saveMessage(
this.options.req,
{
...message,
endpoint: this.options.endpoint,
unfinished: false,
user,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' },
);
if (this.skipSaveConvo) {
return { message: savedMessage };
}
const conversation = await saveConvo(
this.options.req,
{
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
);
return { message: savedMessage, conversation };
}
async updateMessageInDatabase(message) {
await updateMessage(message);
await updateMessage(this.options.req, message);
}
/**

View File

@@ -13,13 +13,20 @@ const {
endpointSettings,
EModelEndpoint,
VisionModes,
Constants,
AuthKeys,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images');
const { formatMessage, createContextHandlers } = require('./prompts');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const {
formatMessage,
createContextHandlers,
titleInstruction,
truncateText,
} = require('./prompts');
const BaseClient = require('./BaseClient');
const loc = 'us-central1';
const publisher = 'google';
@@ -591,12 +598,16 @@ class GoogleClient extends BaseClient {
createLLM(clientOptions) {
const model = clientOptions.modelName ?? clientOptions.model;
if (this.project_id && this.isTextModel) {
logger.debug('Creating Google VertexAI client');
return new GoogleVertexAI(clientOptions);
} else if (this.project_id && this.isChatModel) {
logger.debug('Creating Chat Google VertexAI client');
return new ChatGoogleVertexAI(clientOptions);
} else if (this.project_id) {
logger.debug('Creating VertexAI client');
return new ChatVertexAI(clientOptions);
} else if (model.includes('1.5')) {
logger.debug('Creating GenAI client');
return new GenAI(this.apiKey).getGenerativeModel(
{
...clientOptions,
@@ -606,12 +617,14 @@ class GoogleClient extends BaseClient {
);
}
logger.debug('Creating Chat Google Generative AI client');
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
}
async getCompletion(_payload, options = {}) {
const { onProgress, abortController } = options;
const { parameters, instances } = _payload;
const { onProgress, abortController } = options;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {};
let examples;
@@ -691,6 +704,7 @@ class GoogleClient extends BaseClient {
delay,
});
reply += chunkText;
await sleep(streamRate);
}
return reply;
}
@@ -702,10 +716,17 @@ class GoogleClient extends BaseClient {
safetySettings: safetySettings,
});
let delay = this.isGenerativeModel ? 12 : 8;
if (modelName.includes('flash')) {
delay = 5;
let delay = this.options.streamRate || 8;
if (!this.options.streamRate) {
if (this.isGenerativeModel) {
delay = 12;
}
if (modelName.includes('flash')) {
delay = 5;
}
}
for await (const chunk of stream) {
const chunkText = chunk?.content ?? chunk;
await this.generateTextStream(chunkText, onProgress, {
@@ -717,6 +738,123 @@ class GoogleClient extends BaseClient {
return reply;
}
/**
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
*/
async titleChatCompletion(_payload, options = {}) {
const { abortController } = options;
const { parameters, instances } = _payload;
const { messages: _messages, examples: _examples } = instances?.[0] ?? {};
let clientOptions = { ...parameters, maxRetries: 2 };
logger.debug('Initialized title client options');
if (this.project_id) {
clientOptions['authOptions'] = {
credentials: {
...this.serviceKey,
},
projectId: this.project_id,
};
}
if (!parameters) {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
const model = this.createLLM(clientOptions);
let reply = '';
const messages = this.isTextModel ? _payload.trim() : _messages;
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (modelName?.includes('1.5') && !this.project_id) {
logger.debug('Identified titling model as 1.5 version');
/** @type {GenerativeModel} */
const client = model;
const requestOptions = {
contents: _payload,
};
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: this.options.promptPrefix,
},
],
};
}
const safetySettings = _payload.safetySettings;
requestOptions.safetySettings = safetySettings;
const result = await client.generateContent(requestOptions);
reply = result.response?.text();
return reply;
} else {
logger.debug('Beginning titling');
const safetySettings = _payload.safetySettings;
const titleResponse = await model.invoke(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings: safetySettings,
});
reply = titleResponse.content;
return reply;
}
}
async titleConvo({ text, responseText = '' }) {
let title = 'New Chat';
const convo = `||>User:
"${truncateText(text)}"
||>Response:
"${JSON.stringify(truncateText(responseText))}"`;
let { prompt: payload } = await this.buildMessages([
{
text: `Please generate ${titleInstruction}
${convo}
||>Title:`,
isCreatedByUser: true,
author: this.userLabel,
},
]);
if (this.isVisionModel) {
logger.warn(
`Current vision model does not support titling without an attachment; falling back to default model ${settings.model.default}`,
);
payload.parameters = { ...payload.parameters, model: settings.model.default };
}
try {
title = await this.titleChatCompletion(payload, {
abortController: new AbortController(),
onProgress: () => {},
});
} catch (e) {
logger.error('[GoogleClient] There was an issue generating the title', e);
}
logger.debug(`Title response: ${title}`);
return title;
}
getSaveOptions() {
return {
promptPrefix: this.options.promptPrefix,

View File

@@ -1,7 +1,9 @@
const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { Constants } = require('librechat-data-provider');
const { deriveBaseURL } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const ollamaPayloadSchema = z.object({
@@ -40,6 +42,7 @@ const getValidBase64 = (imageUrl) => {
class OllamaClient {
constructor(options = {}) {
const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434');
this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
/** @type {Ollama} */
this.client = new Ollama({ host });
}
@@ -136,6 +139,8 @@ class OllamaClient {
stream.controller.abort();
break;
}
await sleep(this.streamRate);
}
}
// TODO: regular completion

View File

@@ -27,7 +27,6 @@ const {
createContextHandlers,
} = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { updateTokenWebsocket } = require('~/server/services/Files/Audio');
const { isEnabled, sleep } = require('~/server/utils');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
@@ -595,7 +594,6 @@ class OpenAIClient extends BaseClient {
payload,
(progressMessage) => {
if (progressMessage === '[DONE]') {
updateTokenWebsocket('[DONE]');
return;
}
@@ -1184,8 +1182,10 @@ ${convo}
});
}
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
if (this.message_file_map && this.isOllama) {
const ollamaClient = new OllamaClient({ baseURL });
const ollamaClient = new OllamaClient({ baseURL, streamRate });
return await ollamaClient.chatCompletion({
payload: modelOptions,
onProgress,
@@ -1223,8 +1223,6 @@ ${convo}
}
});
const azureDelay = this.modelOptions.model?.includes('gpt-4') ? 30 : 17;
for await (const chunk of stream) {
const token = chunk.choices[0]?.delta?.content || '';
intermediateReply += token;
@@ -1234,9 +1232,7 @@ ${convo}
break;
}
if (this.azure) {
await sleep(azureDelay);
}
await sleep(streamRate);
}
if (!UnexpectedRoleError) {

View File

@@ -1,5 +1,6 @@
const OpenAIClient = require('./OpenAIClient');
const { CallbackManager } = require('langchain/callbacks');
const { CacheKeys, Time } = require('librechat-data-provider');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
@@ -11,6 +12,7 @@ const { SelfReflectionTool } = require('./tools');
const { isEnabled } = require('~/server/utils');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
class PluginsClient extends OpenAIClient {
@@ -220,6 +222,13 @@ class PluginsClient extends OpenAIClient {
}
}
/**
*
* @param {TMessage} responseMessage
* @param {Partial<TMessage>} saveOptions
* @param {string} user
* @returns
*/
async handleResponseMessage(responseMessage, saveOptions, user) {
const { output, errorMessage, ...result } = this.result;
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
@@ -238,12 +247,32 @@ class PluginsClient extends OpenAIClient {
await this.recordTokenUsage(responseMessage);
}
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessage.messageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
delete responseMessage.tokenCount;
return { ...responseMessage, ...result };
}
async sendMessage(message, opts = {}) {
/** @type {{ filteredTools: string[], includedTools: string[] }} */
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
if (includedTools.length > 0) {
const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin));
this.options.tools = tools;
} else {
const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin));
this.options.tools = tools;
}
// If a message is edited, no tools can be used.
const completionMode = this.options.tools.length === 0 || opts.isEdited;
if (completionMode) {
@@ -301,7 +330,15 @@ class PluginsClient extends OpenAIClient {
if (payload) {
this.currentMessages = payload;
}
await this.saveMessageToDatabase(userMessage, saveOptions, user);
if (!this.skipSaveUserMessage) {
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessagePromise: this.userMessagePromise,
});
}
}
if (isEnabled(process.env.CHECK_BALANCE)) {
await checkBalance({

View File

@@ -1,44 +1,3 @@
/*
module.exports = `You are ChatGPT, a Large Language model with useful tools.
Talk to the human and provide meaningful answers when questions are asked.
Use the tools when you need them, but use your own knowledge if you are confident of the answer. Keep answers short and concise.
A tool is not usually needed for creative requests, so do your best to answer them without tools.
Avoid repeating identical answers if it appears before. Only fulfill the human's requests, do not create extra steps beyond what the human has asked for.
Your input for 'Action' should be the name of tool used only.
Be honest. If you can't answer something, or a tool is not appropriate, say you don't know or answer to the best of your ability.
Attempt to fulfill the human's requests in as few actions as possible`;
*/
// module.exports = `You are ChatGPT, a highly knowledgeable and versatile large language model.
// Engage with the Human conversationally, providing concise and meaningful answers to questions. Utilize built-in tools when necessary, except for creative requests, where relying on your own knowledge is preferred. Aim for variety and avoid repetitive answers.
// For your 'Action' input, state the name of the tool used only, and honor user requests without adding extra steps. Always be honest; if you cannot provide an appropriate answer or tool, admit that or do your best.
// Strive to meet the user's needs efficiently with minimal actions.`;
// import {
// BasePromptTemplate,
// BaseStringPromptTemplate,
// SerializedBasePromptTemplate,
// renderTemplate,
// } from "langchain/prompts";
// prefix: `You are ChatGPT, a highly knowledgeable and versatile large language model.
// Your objective is to help users by understanding their intent and choosing the best action. Prioritize direct, specific responses. Use concise, varied answers and rely on your knowledge for creative tasks. Utilize tools when needed, and structure results for machine compatibility.
// prefix: `Objective: to comprehend human intentions based on user input and available tools. Goal: identify the best action to directly address the human's query. In your subsequent steps, you will utilize the chosen action. You may select multiple actions and list them in a meaningful order. Prioritize actions that directly relate to the user's query over general ones. Ensure that the generated thought is highly specific and explicit to best match the user's expectations. Construct the result in a manner that an online open-API would most likely expect. Provide concise and meaningful answers to human queries. Utilize tools when necessary. Relying on your own knowledge is preferred for creative requests. Aim for variety and avoid repetitive answers.
// # Available Actions & Tools:
// N/A: no suitable action, use your own knowledge.`,
// suffix: `Remember, all your responses MUST adhere to the described format and only respond if the format is followed. Output exactly with the requested format, avoiding any other text as this will be parsed by a machine. Following 'Action:', provide only one of the actions listed above. If a tool is not necessary, deduce this quickly and finish your response. Honor the human's requests without adding extra steps. Carry out tasks in the sequence written by the human. Always be honest; if you cannot provide an appropriate answer or tool, do your best with your own knowledge. Strive to meet the user's needs efficiently with minimal actions.`;
module.exports = {
'gpt3-v1': {
prefix: `Objective: Understand human intentions using user input and available tools. Goal: Identify the most suitable actions to directly address user queries.

View File

@@ -8,8 +8,6 @@ In your response, remember to follow these guidelines:
- If you don't know the answer, simply say that you don't know.
- If you are unsure how to answer, ask for clarification.
- Avoid mentioning that you obtained the information from the context.
Answer appropriately in the user's language.
`;
function createContextHandlers(req, userMessageContent) {
@@ -94,37 +92,40 @@ function createContextHandlers(req, userMessageContent) {
const resolvedQueries = await Promise.all(queryPromises);
const context = resolvedQueries
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
const context =
resolvedQueries.length === 0
? '\n\tThe semantic search did not return any results.'
: resolvedQueries
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
const generateContext = (currentContext) =>
`
const generateContext = (currentContext) =>
`
<file>
<filename>${file.filename}</filename>
<context>${currentContext}
</context>
</file>`;
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
<contextItem>
<![CDATA[${pageContent?.trim()}]]>
</contextItem>`;
})
.join('');
return generateContext(contextItems);
})
.join('');
return generateContext(contextItems);
})
.join('');
if (useFullContext) {
const prompt = `${header}
${context}

View File

@@ -1,4 +1,6 @@
const AnthropicClient = require('../AnthropicClient');
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
@@ -22,7 +24,7 @@ describe('AnthropicClient', () => {
const options = {
modelOptions: {
model,
temperature: 0.7,
temperature: anthropicSettings.temperature.default,
},
};
client = new AnthropicClient('test-api-key');
@@ -33,7 +35,42 @@ describe('AnthropicClient', () => {
it('should set the options correctly', () => {
expect(client.apiKey).toBe('test-api-key');
expect(client.modelOptions.model).toBe(model);
expect(client.modelOptions.temperature).toBe(0.7);
expect(client.modelOptions.temperature).toBe(anthropicSettings.temperature.default);
});
it('should set legacy maxOutputTokens for non-Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should not set maxOutputTokens if not provided', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3',
},
});
expect(client.modelOptions.maxOutputTokens).toBeUndefined();
});
it('should not set legacy maxOutputTokens for Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3-opus-20240229',
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
});
@@ -136,4 +173,57 @@ describe('AnthropicClient', () => {
expect(prompt).toContain('You are Claude-2');
});
});
describe('getClient', () => {
it('should set legacy maxOutputTokens for non-Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should not set legacy maxOutputTokens for Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3-opus-20240229',
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should add beta header for claude-3-5-sonnet model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-3-5-sonnet-20240307',
};
client.setOptions({ modelOptions });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'max-tokens-3-5-sonnet-2024-07-15',
);
});
it('should not add beta header for other models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
},
});
const anthropicClient = client.getClient();
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
});
});
});

View File

@@ -1,7 +1,7 @@
const { Constants } = require('librechat-data-provider');
const { initializeFakeClient } = require('./FakeClient');
jest.mock('../../../lib/db/connectDb');
jest.mock('~/lib/db/connectDb');
jest.mock('~/models', () => ({
User: jest.fn(),
Key: jest.fn(),
@@ -576,7 +576,11 @@ describe('BaseClient', () => {
const onStart = jest.fn();
const opts = { onStart };
await TestClient.sendMessage('Hello, world!', opts);
expect(onStart).toHaveBeenCalledWith(expect.objectContaining({ text: 'Hello, world!' }));
expect(onStart).toHaveBeenCalledWith(
expect.objectContaining({ text: 'Hello, world!' }),
expect.any(String),
);
});
test('saveMessageToDatabase is called with the correct arguments', async () => {
@@ -627,5 +631,32 @@ describe('BaseClient', () => {
}),
);
});
test('userMessagePromise is awaited before saving response message', async () => {
// Mock the saveMessageToDatabase method
TestClient.saveMessageToDatabase = jest.fn().mockImplementation(() => {
return new Promise((resolve) => setTimeout(resolve, 100)); // Simulate a delay
});
// Send a message
const messagePromise = TestClient.sendMessage('Hello, world!');
// Wait a short time to ensure the user message save has started
await new Promise((resolve) => setTimeout(resolve, 50));
// Check that saveMessageToDatabase has been called once (for the user message)
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledTimes(1);
// Wait for the message to be fully processed
await messagePromise;
// Check that saveMessageToDatabase has been called twice (once for user message, once for response)
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledTimes(2);
// Check the order of calls
const calls = TestClient.saveMessageToDatabase.mock.calls;
expect(calls[0][0].isCreatedByUser).toBe(true); // First call should be for user message
expect(calls[1][0].isCreatedByUser).toBe(false); // Second call should be for response message
});
});
});

View File

@@ -194,6 +194,7 @@ describe('PluginsClient', () => {
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
});
});
describe('Azure OpenAI tests specific to Plugins', () => {
// TODO: add more tests for Azure OpenAI integration with Plugins
// let client;
@@ -220,4 +221,94 @@ describe('PluginsClient', () => {
spy.mockRestore();
});
});
describe('sendMessage with filtered tools', () => {
let TestAgent;
const apiKey = 'fake-api-key';
const mockTools = [{ name: 'tool1' }, { name: 'tool2' }, { name: 'tool3' }, { name: 'tool4' }];
beforeEach(() => {
TestAgent = new PluginsClient(apiKey, {
tools: mockTools,
modelOptions: {
model: 'gpt-3.5-turbo',
temperature: 0,
max_tokens: 2,
},
agentOptions: {
model: 'gpt-3.5-turbo',
},
});
TestAgent.options.req = {
app: {
locals: {},
},
};
TestAgent.sendMessage = jest.fn().mockImplementation(async () => {
const { filteredTools = [], includedTools = [] } = TestAgent.options.req.app.locals;
if (includedTools.length > 0) {
const tools = TestAgent.options.tools.filter((plugin) =>
includedTools.includes(plugin.name),
);
TestAgent.options.tools = tools;
} else {
const tools = TestAgent.options.tools.filter(
(plugin) => !filteredTools.includes(plugin.name),
);
TestAgent.options.tools = tools;
}
return {
text: 'Mocked response',
tools: TestAgent.options.tools,
};
});
});
test('should filter out tools when filteredTools is provided', async () => {
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool2' }),
expect.objectContaining({ name: 'tool4' }),
]),
);
});
test('should only include specified tools when includedTools is provided', async () => {
TestAgent.options.req.app.locals.includedTools = ['tool2', 'tool4'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool2' }),
expect.objectContaining({ name: 'tool4' }),
]),
);
});
test('should prioritize includedTools over filteredTools', async () => {
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
TestAgent.options.req.app.locals.includedTools = ['tool1', 'tool2'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool1' }),
expect.objectContaining({ name: 'tool2' }),
]),
);
});
test('should not modify tools when no filters are provided', async () => {
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(4);
expect(response.tools).toEqual(expect.arrayContaining(mockTools));
});
});
});

View File

@@ -1,13 +1,11 @@
const Keyv = require('keyv');
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
const THIRTY_MINUTES = 1800000;
const TEN_MINUTES = 600000;
const duration = math(BAN_DURATION, 7200000);
@@ -25,17 +23,25 @@ const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const audioRuns = isEnabled(USE_REDIS) // ttl: 30 minutes
? new Keyv({ store: keyvRedis, ttl: TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: TEN_MINUTES });
const roles = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ROLES });
const audioRuns = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
const messages = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.FIVE_MINUTES })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.FIVE_MINUTES });
const tokenConfig = isEnabled(USE_REDIS) // ttl: 30 minutes
? new Keyv({ store: keyvRedis, ttl: THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: THIRTY_MINUTES });
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
? new Keyv({ store: keyvRedis, ttl: 120000 })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
@@ -43,9 +49,10 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
const abortKeys = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
const namespaces = {
[CacheKeys.ROLES]: roles,
[CacheKeys.CONFIG_STORE]: config,
pending_req,
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
@@ -63,6 +70,10 @@ const namespaces = {
[ViolationTypes.TTS_LIMIT]: createViolationInstance(ViolationTypes.TTS_LIMIT),
[ViolationTypes.STT_LIMIT]: createViolationInstance(ViolationTypes.STT_LIMIT),
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
[ViolationTypes.VERIFY_EMAIL_LIMIT]: createViolationInstance(ViolationTypes.VERIFY_EMAIL_LIMIT),
[ViolationTypes.RESET_PASSWORD_LIMIT]: createViolationInstance(
ViolationTypes.RESET_PASSWORD_LIMIT,
),
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
ViolationTypes.ILLEGAL_MODEL_REQUEST,
),
@@ -72,6 +83,7 @@ const namespaces = {
[CacheKeys.GEN_TITLE]: genTitle,
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
};
/**

View File

@@ -1,6 +1,6 @@
const { isEnabled } = require('~/server/utils');
const getLogStores = require('./getLogStores');
const banViolation = require('./banViolation');
const { isEnabled } = require('../server/utils');
/**
* Logs the violation.

View File

@@ -109,6 +109,14 @@ const condenseArray = (item) => {
* @returns {string} - The formatted log message.
*/
const debugTraverse = winston.format.printf(({ level, message, timestamp, ...metadata }) => {
if (!message) {
return `${timestamp} ${level}`;
}
if (!message?.trim || typeof message !== 'string') {
return `${timestamp} ${level}: ${JSON.stringify(message)}`;
}
let msg = `${timestamp} ${level}: ${truncateLongStrings(message?.trim(), 150)}`;
try {
if (level !== 'debug') {

61
api/models/Categories.js Normal file
View File

@@ -0,0 +1,61 @@
const { logger } = require('~/config');
// const { Categories } = require('./schema/categories');
const options = [
{
label: '',
value: '',
},
{
label: 'idea',
value: 'idea',
},
{
label: 'travel',
value: 'travel',
},
{
label: 'teach_or_explain',
value: 'teach_or_explain',
},
{
label: 'write',
value: 'write',
},
{
label: 'shop',
value: 'shop',
},
{
label: 'code',
value: 'code',
},
{
label: 'misc',
value: 'misc',
},
{
label: 'roleplay',
value: 'roleplay',
},
{
label: 'finance',
value: 'finance',
},
];
module.exports = {
/**
* Retrieves the categories asynchronously.
* @returns {Promise<TGetCategoriesResponse>} An array of category objects.
* @throws {Error} If there is an error retrieving the categories.
*/
getCategories: async () => {
try {
// const categories = await Categories.find();
return options;
} catch (error) {
logger.error('Error getting categories', error);
return [];
}
},
};

View File

@@ -19,20 +19,39 @@ const getConvo = async (user, conversationId) => {
module.exports = {
Conversation,
saveConvo: async (user, { conversationId, newConversationId, ...convo }) => {
/**
* Saves a conversation to the database.
* @param {Object} req - The request object.
* @param {string} conversationId - The conversation's ID.
* @param {Object} metadata - Additional metadata to log for operation.
* @returns {Promise<TConversation>} The conversation object.
*/
saveConvo: async (req, { conversationId, newConversationId, ...convo }, metadata) => {
try {
if (metadata && metadata?.context) {
logger.debug(`[saveConvo] ${metadata.context}`);
}
const messages = await getMessages({ conversationId }, '_id');
const update = { ...convo, messages, user };
const update = { ...convo, messages, user: req.user.id };
if (newConversationId) {
update.conversationId = newConversationId;
}
return await Conversation.findOneAndUpdate({ conversationId: conversationId, user }, update, {
new: true,
upsert: true,
});
const conversation = await Conversation.findOneAndUpdate(
{ conversationId, user: req.user.id },
update,
{
new: true,
upsert: true,
},
);
return conversation.toObject();
} catch (error) {
logger.error('[saveConvo] Error saving conversation', error);
if (metadata && metadata?.context) {
logger.info(`[saveConvo] ${metadata.context}`);
}
return { message: 'Error saving conversation' };
}
},
@@ -54,13 +73,16 @@ module.exports = {
throw new Error('Failed to save conversations in bulk.');
}
},
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false) => {
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false, tags) => {
const query = { user };
if (isArchived) {
query.isArchived = true;
} else {
query.$or = [{ isArchived: false }, { isArchived: { $exists: false } }];
}
if (Array.isArray(tags) && tags.length > 0) {
query.tags = { $in: tags };
}
try {
const totalConvos = (await Conversation.countDocuments(query)) || 1;
const totalPages = Math.ceil(totalConvos / pageSize);

View File

@@ -0,0 +1,268 @@
//const crypto = require('crypto');
const logger = require('~/config/winston');
const Conversation = require('./schema/convoSchema');
const ConversationTag = require('./schema/conversationTagSchema');
const SAVED_TAG = 'Saved';
const updateTagsForConversation = async (user, conversationId, tags) => {
try {
const conversation = await Conversation.findOne({ user, conversationId });
if (!conversation) {
return { message: 'Conversation not found' };
}
const addedTags = tags.tags.filter((tag) => !conversation.tags.includes(tag));
const removedTags = conversation.tags.filter((tag) => !tags.tags.includes(tag));
for (const tag of addedTags) {
await ConversationTag.updateOne({ tag, user }, { $inc: { count: 1 } }, { upsert: true });
}
for (const tag of removedTags) {
await ConversationTag.updateOne({ tag, user }, { $inc: { count: -1 } });
}
conversation.tags = tags.tags;
await conversation.save({ timestamps: { updatedAt: false } });
return conversation.tags;
} catch (error) {
logger.error('[updateTagsToConversation] Error updating tags', error);
return { message: 'Error updating tags' };
}
};
const createConversationTag = async (user, data) => {
try {
const cTag = await ConversationTag.findOne({ user, tag: data.tag });
if (cTag) {
return cTag;
}
const addToConversation = data.addToConversation && data.conversationId;
const newTag = await ConversationTag.create({
user,
tag: data.tag,
count: 0,
description: data.description,
position: 1,
});
await ConversationTag.updateMany(
{ user, position: { $gte: 1 }, _id: { $ne: newTag._id } },
{ $inc: { position: 1 } },
);
if (addToConversation) {
const conversation = await Conversation.findOne({
user,
conversationId: data.conversationId,
});
if (conversation) {
const tags = [...(conversation.tags || []), data.tag];
await updateTagsForConversation(user, data.conversationId, { tags });
} else {
logger.warn('[updateTagsForConversation] Conversation not found', data.conversationId);
}
}
return await ConversationTag.findOne({ user, tag: data.tag });
} catch (error) {
logger.error('[createConversationTag] Error updating conversation tag', error);
return { message: 'Error updating conversation tag' };
}
};
const replaceOrRemoveTagInConversations = async (user, oldtag, newtag) => {
try {
const conversations = await Conversation.find({ user, tags: { $in: [oldtag] } });
for (const conversation of conversations) {
if (newtag && newtag !== '') {
conversation.tags = conversation.tags.map((tag) => (tag === oldtag ? newtag : tag));
} else {
conversation.tags = conversation.tags.filter((tag) => tag !== oldtag);
}
await conversation.save({ timestamps: { updatedAt: false } });
}
} catch (error) {
logger.error('[replaceOrRemoveTagInConversations] Error updating conversation tags', error);
return { message: 'Error updating conversation tags' };
}
};
const updateTagPosition = async (user, tag, newPosition) => {
try {
const cTag = await ConversationTag.findOne({ user, tag });
if (!cTag) {
return { message: 'Tag not found' };
}
const oldPosition = cTag.position;
if (newPosition === oldPosition) {
return cTag;
}
const updateOperations = [];
if (newPosition > oldPosition) {
// Move other tags up
updateOperations.push({
updateMany: {
filter: {
user,
position: { $gt: oldPosition, $lte: newPosition },
tag: { $ne: SAVED_TAG },
},
update: { $inc: { position: -1 } },
},
});
} else {
// Move other tags down
updateOperations.push({
updateMany: {
filter: {
user,
position: { $gte: newPosition, $lt: oldPosition },
tag: { $ne: SAVED_TAG },
},
update: { $inc: { position: 1 } },
},
});
}
// Update the target tag's position
updateOperations.push({
updateOne: {
filter: { _id: cTag._id },
update: { $set: { position: newPosition } },
},
});
await ConversationTag.bulkWrite(updateOperations);
return await ConversationTag.findById(cTag._id);
} catch (error) {
logger.error('[updateTagPosition] Error updating tag position', error);
return { message: 'Error updating tag position' };
}
};
module.exports = {
SAVED_TAG,
ConversationTag,
getConversationTags: async (user) => {
try {
const cTags = await ConversationTag.find({ user }).sort({ position: 1 }).lean();
cTags.sort((a, b) => (a.tag === SAVED_TAG ? -1 : b.tag === SAVED_TAG ? 1 : 0));
return cTags;
} catch (error) {
logger.error('[getShare] Error getting share link', error);
return { message: 'Error getting share link' };
}
},
createConversationTag,
updateConversationTag: async (user, tag, data) => {
try {
const cTag = await ConversationTag.findOne({ user, tag });
if (!cTag) {
return createConversationTag(user, data);
}
if (cTag.tag !== data.tag || cTag.description !== data.description) {
cTag.tag = data.tag;
cTag.description = data.description === undefined ? cTag.description : data.description;
await cTag.save();
}
if (data.position !== undefined && cTag.position !== data.position) {
await updateTagPosition(user, tag, data.position);
}
// update conversation tags properties
replaceOrRemoveTagInConversations(user, tag, data.tag);
return await ConversationTag.findOne({ user, tag: data.tag });
} catch (error) {
logger.error('[updateConversationTag] Error updating conversation tag', error);
return { message: 'Error updating conversation tag' };
}
},
deleteConversationTag: async (user, tag) => {
try {
const currentTag = await ConversationTag.findOne({ user, tag });
if (!currentTag) {
return;
}
await currentTag.deleteOne({ user, tag });
await replaceOrRemoveTagInConversations(user, tag, null);
return currentTag;
} catch (error) {
logger.error('[deleteConversationTag] Error deleting conversation tag', error);
return { message: 'Error deleting conversation tag' };
}
},
updateTagsForConversation,
rebuildConversationTags: async (user) => {
try {
const conversations = await Conversation.find({ user }).select('tags');
const tagCountMap = {};
// Count the occurrences of each tag
conversations.forEach((conversation) => {
conversation.tags.forEach((tag) => {
if (tagCountMap[tag]) {
tagCountMap[tag]++;
} else {
tagCountMap[tag] = 1;
}
});
});
const tags = await ConversationTag.find({ user }).sort({ position: -1 });
// Update existing tags and add new tags
for (const [tag, count] of Object.entries(tagCountMap)) {
const existingTag = tags.find((t) => t.tag === tag);
if (existingTag) {
existingTag.count = count;
await existingTag.save();
} else {
const newTag = new ConversationTag({ user, tag, count });
tags.push(newTag);
await newTag.save();
}
}
// Set count to 0 for tags that are not in the grouped tags
for (const tag of tags) {
if (!tagCountMap[tag.tag]) {
tag.count = 0;
await tag.save();
}
}
// Sort tags by position in descending order
tags.sort((a, b) => a.position - b.position);
// Move the tag with name "saved" to the first position
const savedTagIndex = tags.findIndex((tag) => tag.tag === SAVED_TAG);
if (savedTagIndex !== -1) {
const [savedTag] = tags.splice(savedTagIndex, 1);
tags.unshift(savedTag);
}
// Reassign positions starting from 0
tags.forEach((tag, index) => {
tag.position = index;
tag.save();
});
return tags;
} catch (error) {
logger.error('[rearrangeTags] Error rearranging tags', error);
return { message: 'Error rearranging tags' };
}
},
};

View File

@@ -97,8 +97,12 @@ const deleteFileByFilter = async (filter) => {
* @param {Array<string>} file_ids - The unique identifiers of the files to delete.
* @returns {Promise<Object>} A promise that resolves to the result of the deletion operation.
*/
const deleteFiles = async (file_ids) => {
return await File.deleteMany({ file_id: { $in: file_ids } });
const deleteFiles = async (file_ids, user) => {
let deleteQuery = { file_id: { $in: file_ids } };
if (user) {
deleteQuery = { user: user };
}
return await File.deleteMany(deleteQuery);
};
module.exports = {

View File

@@ -1,209 +1,342 @@
const { z } = require('zod');
const Message = require('./schema/messageSchema');
const logger = require('~/config/winston');
const { logger } = require('~/config');
const idSchema = z.string().uuid();
/**
* Saves a message in the database.
*
* @async
* @function saveMessage
* @param {Express.Request} req - The request object containing user information.
* @param {Object} params - The message data object.
* @param {string} params.endpoint - The endpoint where the message originated.
* @param {string} params.iconURL - The URL of the sender's icon.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.newMessageId - The new unique identifier for the message (if applicable).
* @param {string} params.conversationId - The identifier of the conversation.
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
* @param {string} params.sender - The identifier of the sender.
* @param {string} params.text - The text content of the message.
* @param {boolean} params.isCreatedByUser - Indicates if the message was created by the user.
* @param {string} [params.error] - Any error associated with the message.
* @param {boolean} [params.unfinished] - Indicates if the message is unfinished.
* @param {Object[]} [params.files] - An array of files associated with the message.
* @param {boolean} [params.isEdited] - Indicates if the message was edited.
* @param {string} [params.finish_reason] - Reason for finishing the message.
* @param {number} [params.tokenCount] - The number of tokens in the message.
* @param {string} [params.plugin] - Plugin associated with the message.
* @param {string[]} [params.plugins] - An array of plugins associated with the message.
* @param {string} [params.model] - The model used to generate the message.
* @param {Object} [metadata] - Additional metadata for this operation
* @param {string} [metadata.context] - The context of the operation
* @returns {Promise<TMessage>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async function saveMessage(req, params, metadata) {
try {
if (!req || !req.user || !req.user.id) {
throw new Error('User not authenticated');
}
const {
text,
error,
model,
files,
plugin,
sender,
plugins,
iconURL,
endpoint,
isEdited,
messageId,
unfinished,
tokenCount,
newMessageId,
finish_reason,
conversationId,
parentMessageId,
isCreatedByUser,
} = params;
const validConvoId = idSchema.safeParse(conversationId);
if (!validConvoId.success) {
logger.warn(`Invalid conversation ID: ${conversationId}`);
if (metadata && metadata?.context) {
logger.info(`---\`saveMessage\` context: ${metadata.context}`);
}
logger.info(`---Invalid conversation ID Params:
${JSON.stringify(params, null, 2)}
`);
return;
}
const update = {
user: req.user.id,
iconURL,
endpoint,
messageId: newMessageId || messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
isEdited,
finish_reason,
error,
unfinished,
tokenCount,
plugin,
plugins,
model,
};
if (files) {
update.files = files;
}
const message = await Message.findOneAndUpdate({ messageId, user: req.user.id }, update, {
upsert: true,
new: true,
});
return message.toObject();
} catch (err) {
logger.error('Error saving message:', err);
if (metadata && metadata?.context) {
logger.info(`---\`saveMessage\` context: ${metadata.context}`);
}
throw err;
}
}
/**
* Saves multiple messages in the database in bulk.
*
* @async
* @function bulkSaveMessages
* @param {Object[]} messages - An array of message objects to save.
* @returns {Promise<Object>} The result of the bulk write operation.
* @throws {Error} If there is an error in saving messages in bulk.
*/
async function bulkSaveMessages(messages) {
try {
const bulkOps = messages.map((message) => ({
updateOne: {
filter: { messageId: message.messageId },
update: message,
upsert: true,
},
}));
const result = await Message.bulkWrite(bulkOps);
return result;
} catch (err) {
logger.error('Error saving messages in bulk:', err);
throw err;
}
}
/**
* Records a message in the database.
*
* @async
* @function recordMessage
* @param {Object} params - The message data object.
* @param {string} params.user - The identifier of the user.
* @param {string} params.endpoint - The endpoint where the message originated.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
* @param {Partial<TMessage>} rest - Any additional properties from the TMessage typedef not explicitly listed.
* @returns {Promise<Object>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async function recordMessage({
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest
}) {
try {
// No parsing of convoId as may use threadId
const message = {
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest,
};
return await Message.findOneAndUpdate({ user, messageId }, message, {
upsert: true,
new: true,
});
} catch (err) {
logger.error('Error recording message:', err);
throw err;
}
}
/**
* Updates the text of a message.
*
* @async
* @function updateMessageText
* @param {Object} params - The update data object.
* @param {Object} req - The request object.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.text - The new text content of the message.
* @returns {Promise<void>}
* @throws {Error} If there is an error in updating the message text.
*/
async function updateMessageText(req, { messageId, text }) {
try {
await Message.updateOne({ messageId, user: req.user.id }, { text });
} catch (err) {
logger.error('Error updating message text:', err);
throw err;
}
}
/**
* Updates a message.
*
* @async
* @function updateMessage
* @param {Object} message - The message object containing update data.
* @param {Object} req - The request object.
* @param {string} message.messageId - The unique identifier for the message.
* @param {string} [message.text] - The new text content of the message.
* @param {Object[]} [message.files] - The files associated with the message.
* @param {boolean} [message.isCreatedByUser] - Indicates if the message was created by the user.
* @param {string} [message.sender] - The identifier of the sender.
* @param {number} [message.tokenCount] - The number of tokens in the message.
* @param {Object} [metadata] - The operation metadata
* @param {string} [metadata.context] - The operation metadata
* @returns {Promise<TMessage>} The updated message document.
* @throws {Error} If there is an error in updating the message or if the message is not found.
*/
async function updateMessage(req, message, metadata) {
try {
const { messageId, ...update } = message;
update.isEdited = true;
const updatedMessage = await Message.findOneAndUpdate(
{ messageId, user: req.user.id },
update,
{
new: true,
},
);
if (!updatedMessage) {
throw new Error('Message not found or user not authorized.');
}
return {
messageId: updatedMessage.messageId,
conversationId: updatedMessage.conversationId,
parentMessageId: updatedMessage.parentMessageId,
sender: updatedMessage.sender,
text: updatedMessage.text,
isCreatedByUser: updatedMessage.isCreatedByUser,
tokenCount: updatedMessage.tokenCount,
isEdited: true,
};
} catch (err) {
logger.error('Error updating message:', err);
if (metadata && metadata?.context) {
logger.info(`---\`updateMessage\` context: ${metadata.context}`);
}
throw err;
}
}
/**
* Deletes messages in a conversation since a specific message.
*
* @async
* @function deleteMessagesSince
* @param {Object} params - The parameters object.
* @param {Object} req - The request object.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @returns {Promise<Number>} The number of deleted messages.
* @throws {Error} If there is an error in deleting messages.
*/
async function deleteMessagesSince(req, { messageId, conversationId }) {
try {
const message = await Message.findOne({ messageId, user: req.user.id }).lean();
if (message) {
const query = Message.find({ conversationId, user: req.user.id });
return await query.deleteMany({
createdAt: { $gt: message.createdAt },
});
}
return undefined;
} catch (err) {
logger.error('Error deleting messages:', err);
throw err;
}
}
/**
* Retrieves messages from the database.
* @async
* @function getMessages
* @param {Record<string, unknown>} filter - The filter criteria.
* @param {string | undefined} [select] - The fields to select.
* @returns {Promise<TMessage[]>} The messages that match the filter criteria.
* @throws {Error} If there is an error in retrieving messages.
*/
async function getMessages(filter, select) {
try {
if (select) {
return await Message.find(filter).select(select).sort({ createdAt: 1 }).lean();
}
return await Message.find(filter).sort({ createdAt: 1 }).lean();
} catch (err) {
logger.error('Error getting messages:', err);
throw err;
}
}
/**
* Deletes messages from the database.
*
* @async
* @function deleteMessages
* @param {Object} filter - The filter criteria to find messages to delete.
* @returns {Promise<Number>} The number of deleted messages.
* @throws {Error} If there is an error in deleting messages.
*/
async function deleteMessages(filter) {
try {
return await Message.deleteMany(filter);
} catch (err) {
logger.error('Error deleting messages:', err);
throw err;
}
}
module.exports = {
Message,
async saveMessage({
user,
endpoint,
iconURL,
messageId,
newMessageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
error,
unfinished,
files,
isEdited,
finish_reason,
tokenCount,
plugin,
plugins,
model,
}) {
try {
const validConvoId = idSchema.safeParse(conversationId);
if (!validConvoId.success) {
return;
}
const update = {
user,
iconURL,
endpoint,
messageId: newMessageId || messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
isEdited,
finish_reason,
error,
unfinished,
tokenCount,
plugin,
plugins,
model,
};
if (files) {
update.files = files;
}
// may also need to update the conversation here
await Message.findOneAndUpdate({ messageId }, update, { upsert: true, new: true });
return {
messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
tokenCount,
};
} catch (err) {
logger.error('Error saving message:', err);
throw new Error('Failed to save message.');
}
},
async bulkSaveMessages(messages) {
try {
const bulkOps = messages.map((message) => ({
updateOne: {
filter: { messageId: message.messageId },
update: message,
upsert: true,
},
}));
const result = await Message.bulkWrite(bulkOps);
return result;
} catch (err) {
logger.error('Error saving messages in bulk:', err);
throw new Error('Failed to save messages in bulk.');
}
},
/**
* Records a message in the database.
*
* @async
* @function recordMessage
* @param {Object} params - The message data object.
* @param {string} params.user - The identifier of the user.
* @param {string} params.endpoint - The endpoint where the message originated.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
* @param {Partial<TMessage>} rest - Any additional properties from the TMessage typedef not explicitly listed.
* @returns {Promise<Object>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async recordMessage({ user, endpoint, messageId, conversationId, parentMessageId, ...rest }) {
try {
// No parsing of convoId as may use threadId
const message = {
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest,
};
return await Message.findOneAndUpdate({ user, messageId }, message, {
upsert: true,
new: true,
});
} catch (err) {
logger.error('Error saving message:', err);
throw new Error('Failed to save message.');
}
},
async updateMessageText({ messageId, text }) {
try {
await Message.updateOne({ messageId }, { text });
} catch (err) {
logger.error('Error updating message text:', err);
throw new Error('Failed to update message text.');
}
},
async updateMessage(message) {
try {
const { messageId, ...update } = message;
update.isEdited = true;
const updatedMessage = await Message.findOneAndUpdate({ messageId }, update, {
new: true,
});
if (!updatedMessage) {
throw new Error('Message not found.');
}
return {
messageId: updatedMessage.messageId,
conversationId: updatedMessage.conversationId,
parentMessageId: updatedMessage.parentMessageId,
sender: updatedMessage.sender,
text: updatedMessage.text,
isCreatedByUser: updatedMessage.isCreatedByUser,
tokenCount: updatedMessage.tokenCount,
isEdited: true,
};
} catch (err) {
logger.error('Error updating message:', err);
throw new Error('Failed to update message.');
}
},
async deleteMessagesSince({ messageId, conversationId }) {
try {
const message = await Message.findOne({ messageId }).lean();
if (message) {
return await Message.find({ conversationId }).deleteMany({
createdAt: { $gt: message.createdAt },
});
}
} catch (err) {
logger.error('Error deleting messages:', err);
throw new Error('Failed to delete messages.');
}
},
/**
* Retrieves messages from the database.
* @param {Record<string, unknown>} filter
* @param {string | undefined} [select]
* @returns
*/
async getMessages(filter, select) {
try {
if (select) {
return await Message.find(filter).select(select).sort({ createdAt: 1 }).lean();
}
return await Message.find(filter).sort({ createdAt: 1 }).lean();
} catch (err) {
logger.error('Error getting messages:', err);
throw new Error('Failed to get messages.');
}
},
async deleteMessages(filter) {
try {
return await Message.deleteMany(filter);
} catch (err) {
logger.error('Error deleting messages:', err);
throw new Error('Failed to delete messages.');
}
},
saveMessage,
bulkSaveMessages,
recordMessage,
updateMessageText,
updateMessage,
deleteMessagesSince,
getMessages,
deleteMessages,
};

239
api/models/Message.spec.js Normal file
View File

@@ -0,0 +1,239 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
jest.mock('mongoose');
const mockFindQuery = {
select: jest.fn().mockReturnThis(),
sort: jest.fn().mockReturnThis(),
lean: jest.fn().mockReturnThis(),
deleteMany: jest.fn().mockResolvedValue({ deletedCount: 1 }),
};
const mockSchema = {
findOneAndUpdate: jest.fn(),
updateOne: jest.fn(),
findOne: jest.fn(() => ({
lean: jest.fn(),
})),
find: jest.fn(() => mockFindQuery),
deleteMany: jest.fn(),
};
mongoose.model.mockReturnValue(mockSchema);
jest.mock('~/models/schema/messageSchema', () => mockSchema);
jest.mock('~/config/winston', () => ({
error: jest.fn(),
}));
const {
saveMessage,
getMessages,
updateMessage,
deleteMessages,
updateMessageText,
deleteMessagesSince,
} = require('~/models/Message');
describe('Message Operations', () => {
let mockReq;
let mockMessage;
beforeEach(() => {
jest.clearAllMocks();
mockReq = {
user: { id: 'user123' },
};
mockMessage = {
messageId: 'msg123',
conversationId: uuidv4(),
text: 'Hello, world!',
user: 'user123',
};
mockSchema.findOneAndUpdate.mockResolvedValue({
toObject: () => mockMessage,
});
});
describe('saveMessage', () => {
it('should save a message for an authenticated user', async () => {
const result = await saveMessage(mockReq, mockMessage);
expect(result).toEqual(mockMessage);
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
{ messageId: 'msg123', user: 'user123' },
expect.objectContaining({ user: 'user123' }),
expect.any(Object),
);
});
it('should throw an error for unauthenticated user', async () => {
mockReq.user = null;
await expect(saveMessage(mockReq, mockMessage)).rejects.toThrow('User not authenticated');
});
it('should throw an error for invalid conversation ID', async () => {
mockMessage.conversationId = 'invalid-id';
await expect(saveMessage(mockReq, mockMessage)).resolves.toBeUndefined();
});
});
describe('updateMessageText', () => {
it('should update message text for the authenticated user', async () => {
await updateMessageText(mockReq, { messageId: 'msg123', text: 'Updated text' });
expect(mockSchema.updateOne).toHaveBeenCalledWith(
{ messageId: 'msg123', user: 'user123' },
{ text: 'Updated text' },
);
});
});
describe('updateMessage', () => {
it('should update a message for the authenticated user', async () => {
mockSchema.findOneAndUpdate.mockResolvedValue(mockMessage);
const result = await updateMessage(mockReq, { messageId: 'msg123', text: 'Updated text' });
expect(result).toEqual(
expect.objectContaining({
messageId: 'msg123',
text: 'Hello, world!',
isEdited: true,
}),
);
});
it('should throw an error if message is not found', async () => {
mockSchema.findOneAndUpdate.mockResolvedValue(null);
await expect(
updateMessage(mockReq, { messageId: 'nonexistent', text: 'Test' }),
).rejects.toThrow('Message not found or user not authorized.');
});
});
describe('deleteMessagesSince', () => {
it('should delete messages only for the authenticated user', async () => {
mockSchema.findOne().lean.mockResolvedValueOnce({ createdAt: new Date() });
mockFindQuery.deleteMany.mockResolvedValueOnce({ deletedCount: 1 });
const result = await deleteMessagesSince(mockReq, {
messageId: 'msg123',
conversationId: 'convo123',
});
expect(mockSchema.findOne).toHaveBeenCalledWith({ messageId: 'msg123', user: 'user123' });
expect(mockSchema.find).not.toHaveBeenCalled();
expect(result).toBeUndefined();
});
it('should return undefined if no message is found', async () => {
mockSchema.findOne().lean.mockResolvedValueOnce(null);
const result = await deleteMessagesSince(mockReq, {
messageId: 'nonexistent',
conversationId: 'convo123',
});
expect(result).toBeUndefined();
});
});
describe('getMessages', () => {
it('should retrieve messages with the correct filter', async () => {
const filter = { conversationId: 'convo123' };
await getMessages(filter);
expect(mockSchema.find).toHaveBeenCalledWith(filter);
expect(mockFindQuery.sort).toHaveBeenCalledWith({ createdAt: 1 });
expect(mockFindQuery.lean).toHaveBeenCalled();
});
});
describe('deleteMessages', () => {
it('should delete messages with the correct filter', async () => {
await deleteMessages({ user: 'user123' });
expect(mockSchema.deleteMany).toHaveBeenCalledWith({ user: 'user123' });
});
});
describe('Conversation Hijacking Prevention', () => {
it('should not allow editing a message in another user\'s conversation', async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = 'victim-convo-123';
const victimMessageId = 'victim-msg-123';
mockSchema.findOneAndUpdate.mockResolvedValue(null);
await expect(
updateMessage(attackerReq, {
messageId: victimMessageId,
conversationId: victimConversationId,
text: 'Hacked message',
}),
).rejects.toThrow('Message not found or user not authorized.');
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
{ messageId: victimMessageId, user: 'attacker123' },
expect.anything(),
expect.anything(),
);
});
it('should not allow deleting messages from another user\'s conversation', async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = 'victim-convo-123';
const victimMessageId = 'victim-msg-123';
mockSchema.findOne().lean.mockResolvedValueOnce(null); // Simulating message not found for this user
const result = await deleteMessagesSince(attackerReq, {
messageId: victimMessageId,
conversationId: victimConversationId,
});
expect(result).toBeUndefined();
expect(mockSchema.findOne).toHaveBeenCalledWith({
messageId: victimMessageId,
user: 'attacker123',
});
});
it('should not allow inserting a new message into another user\'s conversation', async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = uuidv4(); // Use a valid UUID
await expect(
saveMessage(attackerReq, {
conversationId: victimConversationId,
text: 'Inserted malicious message',
messageId: 'new-msg-123',
}),
).resolves.not.toThrow(); // It should not throw an error
// Check that the message was saved with the attacker's user ID
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
{ messageId: 'new-msg-123', user: 'attacker123' },
expect.objectContaining({
user: 'attacker123',
conversationId: victimConversationId,
}),
expect.anything(),
);
});
it('should allow retrieving messages from any conversation', async () => {
const victimConversationId = 'victim-convo-123';
await getMessages({ conversationId: victimConversationId });
expect(mockSchema.find).toHaveBeenCalledWith({
conversationId: victimConversationId,
});
mockSchema.find.mockReturnValueOnce({
select: jest.fn().mockReturnThis(),
sort: jest.fn().mockReturnThis(),
lean: jest.fn().mockResolvedValue([{ text: 'Test message' }]),
});
const result = await getMessages({ conversationId: victimConversationId });
expect(result).toEqual([{ text: 'Test message' }]);
});
});
});

90
api/models/Project.js Normal file
View File

@@ -0,0 +1,90 @@
const { model } = require('mongoose');
const projectSchema = require('~/models/schema/projectSchema');
const Project = model('Project', projectSchema);
/**
* Retrieve a project by ID and convert the found project document to a plain object.
*
* @param {string} projectId - The ID of the project to find and return as a plain object.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoProject>} A plain object representing the project document, or `null` if no project is found.
*/
const getProjectById = async function (projectId, fieldsToSelect = null) {
const query = Project.findById(projectId);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
/**
* Retrieve a project by name and convert the found project document to a plain object.
* If the project with the given name doesn't exist and the name is "instance", create it and return the lean version.
*
* @param {string} projectName - The name of the project to find or create.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoProject>} A plain object representing the project document.
*/
const getProjectByName = async function (projectName, fieldsToSelect = null) {
const query = { name: projectName };
const update = { $setOnInsert: { name: projectName } };
const options = {
new: true,
upsert: projectName === 'instance',
lean: true,
select: fieldsToSelect,
};
return await Project.findOneAndUpdate(query, update, options);
};
/**
* Add an array of prompt group IDs to a project's promptGroupIds array, ensuring uniqueness.
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} promptGroupIds - The array of prompt group IDs to add to the project.
* @returns {Promise<MongoProject>} The updated project document.
*/
const addGroupIdsToProject = async function (projectId, promptGroupIds) {
return await Project.findByIdAndUpdate(
projectId,
{ $addToSet: { promptGroupIds: { $each: promptGroupIds } } },
{ new: true },
);
};
/**
* Remove an array of prompt group IDs from a project's promptGroupIds array.
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} promptGroupIds - The array of prompt group IDs to remove from the project.
* @returns {Promise<MongoProject>} The updated project document.
*/
const removeGroupIdsFromProject = async function (projectId, promptGroupIds) {
return await Project.findByIdAndUpdate(
projectId,
{ $pull: { promptGroupIds: { $in: promptGroupIds } } },
{ new: true },
);
};
/**
* Remove a prompt group ID from all projects.
*
* @param {string} promptGroupId - The ID of the prompt group to remove from projects.
* @returns {Promise<void>}
*/
const removeGroupFromAllProjects = async (promptGroupId) => {
await Project.updateMany({}, { $pull: { promptGroupIds: promptGroupId } });
};
module.exports = {
getProjectById,
getProjectByName,
addGroupIdsToProject,
removeGroupIdsFromProject,
removeGroupFromAllProjects,
};

View File

@@ -1,52 +1,528 @@
const mongoose = require('mongoose');
const { ObjectId } = require('mongodb');
const { SystemRoles, SystemCategories } = require('librechat-data-provider');
const {
getProjectByName,
addGroupIdsToProject,
removeGroupIdsFromProject,
removeGroupFromAllProjects,
} = require('./Project');
const { Prompt, PromptGroup } = require('./schema/promptSchema');
const { logger } = require('~/config');
const promptSchema = mongoose.Schema(
{
title: {
type: String,
required: true,
/**
* Create a pipeline for the aggregation to get prompt groups
* @param {Object} query
* @param {number} skip
* @param {number} limit
* @returns {[Object]} - The pipeline for the aggregation
*/
const createGroupPipeline = (query, skip, limit) => {
return [
{ $match: query },
{ $sort: { createdAt: -1 } },
{ $skip: skip },
{ $limit: limit },
{
$lookup: {
from: 'prompts',
localField: 'productionId',
foreignField: '_id',
as: 'productionPrompt',
},
},
prompt: {
type: String,
required: true,
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
{
$project: {
name: 1,
numberOfGenerations: 1,
oneliner: 1,
category: 1,
projectIds: 1,
productionId: 1,
author: 1,
authorName: 1,
createdAt: 1,
updatedAt: 1,
'productionPrompt.prompt': 1,
// 'productionPrompt._id': 1,
// 'productionPrompt.type': 1,
},
},
category: {
type: String,
},
},
{ timestamps: true },
);
];
};
const Prompt = mongoose.models.Prompt || mongoose.model('Prompt', promptSchema);
/**
* Create a pipeline for the aggregation to get all prompt groups
* @param {Object} query
* @param {Partial<MongoPromptGroup>} $project
* @returns {[Object]} - The pipeline for the aggregation
*/
const createAllGroupsPipeline = (
query,
$project = {
name: 1,
oneliner: 1,
category: 1,
author: 1,
authorName: 1,
createdAt: 1,
updatedAt: 1,
command: 1,
'productionPrompt.prompt': 1,
},
) => {
return [
{ $match: query },
{ $sort: { createdAt: -1 } },
{
$lookup: {
from: 'prompts',
localField: 'productionId',
foreignField: '_id',
as: 'productionPrompt',
},
},
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
{
$project,
},
];
};
/**
* Get all prompt groups with filters
* @param {Object} req
* @param {TPromptGroupsWithFilterRequest} filter
* @returns {Promise<PromptGroupListResponse>}
*/
const getAllPromptGroups = async (req, filter) => {
try {
const { name, ...query } = filter;
if (!query.author) {
throw new Error('Author is required');
}
let searchShared = true;
let searchSharedOnly = false;
if (name) {
query.name = new RegExp(name, 'i');
}
if (!query.category) {
delete query.category;
} else if (query.category === SystemCategories.MY_PROMPTS) {
searchShared = false;
delete query.category;
} else if (query.category === SystemCategories.NO_CATEGORY) {
query.category = '';
} else if (query.category === SystemCategories.SHARED_PROMPTS) {
searchSharedOnly = true;
delete query.category;
}
let combinedQuery = query;
if (searchShared) {
const project = await getProjectByName('instance', 'promptGroupIds');
if (project && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
}
}
const promptGroupsPipeline = createAllGroupsPipeline(combinedQuery);
return await PromptGroup.aggregate(promptGroupsPipeline).exec();
} catch (error) {
console.error('Error getting all prompt groups', error);
return { message: 'Error getting all prompt groups' };
}
};
/**
* Get prompt groups with filters
* @param {Object} req
* @param {TPromptGroupsWithFilterRequest} filter
* @returns {Promise<PromptGroupListResponse>}
*/
const getPromptGroups = async (req, filter) => {
try {
const { pageNumber = 1, pageSize = 10, name, ...query } = filter;
const validatedPageNumber = Math.max(parseInt(pageNumber, 10), 1);
const validatedPageSize = Math.max(parseInt(pageSize, 10), 1);
if (!query.author) {
throw new Error('Author is required');
}
let searchShared = true;
let searchSharedOnly = false;
if (name) {
query.name = new RegExp(name, 'i');
}
if (!query.category) {
delete query.category;
} else if (query.category === SystemCategories.MY_PROMPTS) {
searchShared = false;
delete query.category;
} else if (query.category === SystemCategories.NO_CATEGORY) {
query.category = '';
} else if (query.category === SystemCategories.SHARED_PROMPTS) {
searchSharedOnly = true;
delete query.category;
}
let combinedQuery = query;
if (searchShared) {
// const projects = req.user.projects || []; // TODO: handle multiple projects
const project = await getProjectByName('instance', 'promptGroupIds');
if (project && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
}
}
const skip = (validatedPageNumber - 1) * validatedPageSize;
const limit = validatedPageSize;
const promptGroupsPipeline = createGroupPipeline(combinedQuery, skip, limit);
const totalPromptGroupsPipeline = [{ $match: combinedQuery }, { $count: 'total' }];
const [promptGroupsResults, totalPromptGroupsResults] = await Promise.all([
PromptGroup.aggregate(promptGroupsPipeline).exec(),
PromptGroup.aggregate(totalPromptGroupsPipeline).exec(),
]);
const promptGroups = promptGroupsResults;
const totalPromptGroups =
totalPromptGroupsResults.length > 0 ? totalPromptGroupsResults[0].total : 0;
return {
promptGroups,
pageNumber: validatedPageNumber.toString(),
pageSize: validatedPageSize.toString(),
pages: Math.ceil(totalPromptGroups / validatedPageSize).toString(),
};
} catch (error) {
console.error('Error getting prompt groups', error);
return { message: 'Error getting prompt groups' };
}
};
module.exports = {
savePrompt: async ({ title, prompt }) => {
getPromptGroups,
getAllPromptGroups,
/**
* Create a prompt and its respective group
* @param {TCreatePromptRecord} saveData
* @returns {Promise<TCreatePromptResponse>}
*/
createPromptGroup: async (saveData) => {
try {
await Prompt.create({
title,
prompt,
});
return { title, prompt };
const { prompt, group, author, authorName } = saveData;
let newPromptGroup = await PromptGroup.findOneAndUpdate(
{ ...group, author, authorName, productionId: null },
{ $setOnInsert: { ...group, author, authorName, productionId: null } },
{ new: true, upsert: true },
)
.lean()
.select('-__v')
.exec();
const newPrompt = await Prompt.findOneAndUpdate(
{ ...prompt, author, groupId: newPromptGroup._id },
{ $setOnInsert: { ...prompt, author, groupId: newPromptGroup._id } },
{ new: true, upsert: true },
)
.lean()
.select('-__v')
.exec();
newPromptGroup = await PromptGroup.findByIdAndUpdate(
newPromptGroup._id,
{ productionId: newPrompt._id },
{ new: true },
)
.lean()
.select('-__v')
.exec();
return {
prompt: newPrompt,
group: {
...newPromptGroup,
productionPrompt: { prompt: newPrompt.prompt },
},
};
} catch (error) {
logger.error('Error saving prompt group', error);
throw new Error('Error saving prompt group');
}
},
/**
* Save a prompt
* @param {TCreatePromptRecord} saveData
* @returns {Promise<TCreatePromptResponse>}
*/
savePrompt: async (saveData) => {
try {
const { prompt, author } = saveData;
const newPromptData = {
...prompt,
author,
};
/** @type {TPrompt} */
let newPrompt;
try {
newPrompt = await Prompt.create(newPromptData);
} catch (error) {
if (error?.message?.includes('groupId_1_version_1')) {
await Prompt.db.collection('prompts').dropIndex('groupId_1_version_1');
} else {
throw error;
}
newPrompt = await Prompt.create(newPromptData);
}
return { prompt: newPrompt };
} catch (error) {
logger.error('Error saving prompt', error);
return { prompt: 'Error saving prompt' };
return { message: 'Error saving prompt' };
}
},
getPrompts: async (filter) => {
try {
return await Prompt.find(filter).lean();
return await Prompt.find(filter).sort({ createdAt: -1 }).lean();
} catch (error) {
logger.error('Error getting prompts', error);
return { prompt: 'Error getting prompts' };
return { message: 'Error getting prompts' };
}
},
deletePrompts: async (filter) => {
getPrompt: async (filter) => {
try {
return await Prompt.deleteMany(filter);
if (filter.groupId) {
filter.groupId = new ObjectId(filter.groupId);
}
return await Prompt.findOne(filter).lean();
} catch (error) {
logger.error('Error deleting prompts', error);
return { prompt: 'Error deleting prompts' };
logger.error('Error getting prompt', error);
return { message: 'Error getting prompt' };
}
},
/**
* Get prompt groups with filters
* @param {TGetRandomPromptsRequest} filter
* @returns {Promise<TGetRandomPromptsResponse>}
*/
getRandomPromptGroups: async (filter) => {
try {
const result = await PromptGroup.aggregate([
{
$match: {
category: { $ne: '' },
},
},
{
$group: {
_id: '$category',
promptGroup: { $first: '$$ROOT' },
},
},
{
$replaceRoot: { newRoot: '$promptGroup' },
},
{
$sample: { size: +filter.limit + +filter.skip },
},
{
$skip: +filter.skip,
},
{
$limit: +filter.limit,
},
]);
return { prompts: result };
} catch (error) {
logger.error('Error getting prompt groups', error);
return { message: 'Error getting prompt groups' };
}
},
getPromptGroupsWithPrompts: async (filter) => {
try {
return await PromptGroup.findOne(filter)
.populate({
path: 'prompts',
select: '-_id -__v -user',
})
.select('-_id -__v -user')
.lean();
} catch (error) {
logger.error('Error getting prompt groups', error);
return { message: 'Error getting prompt groups' };
}
},
getPromptGroup: async (filter) => {
try {
return await PromptGroup.findOne(filter).lean();
} catch (error) {
logger.error('Error getting prompt group', error);
return { message: 'Error getting prompt group' };
}
},
/**
* Deletes a prompt and its corresponding prompt group if it is the last prompt in the group.
*
* @param {Object} options - The options for deleting the prompt.
* @param {ObjectId|string} options.promptId - The ID of the prompt to delete.
* @param {ObjectId|string} options.groupId - The ID of the prompt's group.
* @param {ObjectId|string} options.author - The ID of the prompt's author.
* @param {string} options.role - The role of the prompt's author.
* @return {Promise<TDeletePromptResponse>} An object containing the result of the deletion.
* If the prompt was deleted successfully, the object will have a property 'prompt' with the value 'Prompt deleted successfully'.
* If the prompt group was deleted successfully, the object will have a property 'promptGroup' with the message 'Prompt group deleted successfully' and id of the deleted group.
* If there was an error deleting the prompt, the object will have a property 'message' with the value 'Error deleting prompt'.
*/
deletePrompt: async ({ promptId, groupId, author, role }) => {
const query = { _id: promptId, groupId, author };
if (role === SystemRoles.ADMIN) {
delete query.author;
}
const { deletedCount } = await Prompt.deleteOne(query);
if (deletedCount === 0) {
throw new Error('Failed to delete the prompt');
}
const remainingPrompts = await Prompt.find({ groupId })
.select('_id')
.sort({ createdAt: 1 })
.lean();
if (remainingPrompts.length === 0) {
await PromptGroup.deleteOne({ _id: groupId });
await removeGroupFromAllProjects(groupId);
return {
prompt: 'Prompt deleted successfully',
promptGroup: {
message: 'Prompt group deleted successfully',
id: groupId,
},
};
} else {
const promptGroup = await PromptGroup.findById(groupId).lean();
if (promptGroup.productionId.toString() === promptId.toString()) {
await PromptGroup.updateOne(
{ _id: groupId },
{ productionId: remainingPrompts[remainingPrompts.length - 1]._id },
);
}
return { prompt: 'Prompt deleted successfully' };
}
},
/**
* Update prompt group
* @param {Partial<MongoPromptGroup>} filter - Filter to find prompt group
* @param {Partial<MongoPromptGroup>} data - Data to update
* @returns {Promise<TUpdatePromptGroupResponse>}
*/
updatePromptGroup: async (filter, data) => {
try {
const updateOps = {};
if (data.removeProjectIds) {
for (const projectId of data.removeProjectIds) {
await removeGroupIdsFromProject(projectId, [filter._id]);
}
updateOps.$pull = { projectIds: { $in: data.removeProjectIds } };
delete data.removeProjectIds;
}
if (data.projectIds) {
for (const projectId of data.projectIds) {
await addGroupIdsToProject(projectId, [filter._id]);
}
updateOps.$addToSet = { projectIds: { $each: data.projectIds } };
delete data.projectIds;
}
const updateData = { ...data, ...updateOps };
const updatedDoc = await PromptGroup.findOneAndUpdate(filter, updateData, {
new: true,
upsert: false,
});
if (!updatedDoc) {
throw new Error('Prompt group not found');
}
return updatedDoc;
} catch (error) {
logger.error('Error updating prompt group', error);
return { message: 'Error updating prompt group' };
}
},
/**
* Function to make a prompt production based on its ID.
* @param {String} promptId - The ID of the prompt to make production.
* @returns {Object} The result of the production operation.
*/
makePromptProduction: async (promptId) => {
try {
const prompt = await Prompt.findById(promptId).lean();
if (!prompt) {
throw new Error('Prompt not found');
}
await PromptGroup.findByIdAndUpdate(
prompt.groupId,
{ productionId: prompt._id },
{ new: true },
)
.lean()
.exec();
return {
message: 'Prompt production made successfully',
};
} catch (error) {
logger.error('Error making prompt production', error);
return { message: 'Error making prompt production' };
}
},
updatePromptLabels: async (_id, labels) => {
try {
const response = await Prompt.updateOne({ _id }, { $set: { labels } });
if (response.matchedCount === 0) {
return { message: 'Prompt not found' };
}
return { message: 'Prompt labels updated successfully' };
} catch (error) {
logger.error('Error updating prompt labels', error);
return { message: 'Error updating prompt labels' };
}
},
deletePromptGroup: async (_id) => {
try {
const response = await PromptGroup.deleteOne({ _id });
if (response.deletedCount === 0) {
return { promptGroup: 'Prompt group not found' };
}
await Prompt.deleteMany({ groupId: new ObjectId(_id) });
await removeGroupFromAllProjects(_id);
return { promptGroup: 'Prompt group deleted successfully' };
} catch (error) {
logger.error('Error deleting prompt group', error);
return { message: 'Error deleting prompt group' };
}
},
};

86
api/models/Role.js Normal file
View File

@@ -0,0 +1,86 @@
const { SystemRoles, CacheKeys, roleDefaults } = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const Role = require('~/models/schema/roleSchema');
/**
* Retrieve a role by name and convert the found role document to a plain object.
* If the role with the given name doesn't exist and the name is a system defined role, create it and return the lean version.
*
* @param {string} roleName - The name of the role to find or create.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<Object>} A plain object representing the role document.
*/
const getRoleByName = async function (roleName, fieldsToSelect = null) {
try {
const cache = getLogStores(CacheKeys.ROLES);
const cachedRole = await cache.get(roleName);
if (cachedRole) {
return cachedRole;
}
let query = Role.findOne({ name: roleName });
if (fieldsToSelect) {
query = query.select(fieldsToSelect);
}
let role = await query.lean().exec();
if (!role && SystemRoles[roleName]) {
role = roleDefaults[roleName];
role = await new Role(role).save();
await cache.set(roleName, role);
return role.toObject();
}
await cache.set(roleName, role);
return role;
} catch (error) {
throw new Error(`Failed to retrieve or create role: ${error.message}`);
}
};
/**
* Update role values by name.
*
* @param {string} roleName - The name of the role to update.
* @param {Partial<TRole>} updates - The fields to update.
* @returns {Promise<TRole>} Updated role document.
*/
const updateRoleByName = async function (roleName, updates) {
try {
const cache = getLogStores(CacheKeys.ROLES);
const role = await Role.findOneAndUpdate(
{ name: roleName },
{ $set: updates },
{ new: true, lean: true },
)
.select('-__v')
.lean()
.exec();
await cache.set(roleName, role);
return role;
} catch (error) {
throw new Error(`Failed to update role: ${error.message}`);
}
};
/**
* Initialize default roles in the system.
* Creates the default roles (ADMIN, USER) if they don't exist in the database.
*
* @returns {Promise<void>}
*/
const initializeRoles = async function () {
const defaultRoles = [SystemRoles.ADMIN, SystemRoles.USER];
for (const roleName of defaultRoles) {
let role = await Role.findOne({ name: roleName }).select('name').lean();
if (!role) {
role = new Role(roleDefaults[roleName]);
await role.save();
}
}
};
module.exports = {
getRoleByName,
initializeRoles,
updateRoleByName,
};

View File

@@ -22,7 +22,7 @@ module.exports = {
return share;
} catch (error) {
logger.error('[getShare] Error getting share link', error);
return { message: 'Error getting share link' };
throw new Error('Error getting share link');
}
},
@@ -41,17 +41,17 @@ module.exports = {
return { sharedLinks: shares, pages: totalPages, pageNumber, pageSize };
} catch (error) {
logger.error('[getShareByPage] Error getting shares', error);
return { message: 'Error getting shares' };
throw new Error('Error getting shares');
}
},
createSharedLink: async (user, { conversationId, ...shareData }) => {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (share) {
return share;
}
try {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (share) {
return share;
}
const shareId = crypto.randomUUID();
const messages = await getMessages({ conversationId });
const update = { ...shareData, shareId, messages, user };
@@ -60,30 +60,58 @@ module.exports = {
upsert: true,
});
} catch (error) {
logger.error('[saveShareMessage] Error saving conversation', error);
return { message: 'Error saving conversation' };
logger.error('[createSharedLink] Error creating shared link', error);
throw new Error('Error creating shared link');
}
},
updateSharedLink: async (user, { conversationId, ...shareData }) => {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (!share) {
return { message: 'Share not found' };
try {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (!share) {
return { message: 'Share not found' };
}
// update messages to the latest
const messages = await getMessages({ conversationId });
const update = { ...shareData, messages, user };
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
new: true,
upsert: false,
});
} catch (error) {
logger.error('[updateSharedLink] Error updating shared link', error);
throw new Error('Error updating shared link');
}
// update messages to the latest
const messages = await getMessages({ conversationId });
const update = { ...shareData, messages, user };
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
new: true,
upsert: false,
});
},
deleteSharedLink: async (user, { shareId }) => {
const share = await SharedLink.findOne({ shareId, user });
if (!share) {
return { message: 'Share not found' };
try {
const share = await SharedLink.findOne({ shareId, user });
if (!share) {
return { message: 'Share not found' };
}
return await SharedLink.findOneAndDelete({ shareId, user });
} catch (error) {
logger.error('[deleteSharedLink] Error deleting shared link', error);
throw new Error('Error deleting shared link');
}
},
/**
* Deletes all shared links for a specific user.
* @param {string} user - The user ID.
* @returns {Promise<{ message: string, deletedCount?: number }>} A result object indicating success or error message.
*/
deleteAllSharedLinks: async (user) => {
try {
const result = await SharedLink.deleteMany({ user });
return {
message: 'All shared links have been deleted successfully',
deletedCount: result.deletedCount,
};
} catch (error) {
logger.error('[deleteAllSharedLinks] Error deleting shared links', error);
throw new Error('Error deleting shared links');
}
return await SharedLink.findOneAndDelete({ shareId, user });
},
};

View File

@@ -1,61 +1,5 @@
const mongoose = require('mongoose');
const bcrypt = require('bcryptjs');
const signPayload = require('../server/services/signPayload');
const userSchema = require('./schema/userSchema.js');
const { SESSION_EXPIRY } = process.env ?? {};
const expires = eval(SESSION_EXPIRY) ?? 1000 * 60 * 15;
userSchema.methods.toJSON = function () {
return {
id: this._id,
provider: this.provider,
email: this.email,
name: this.name,
username: this.username,
avatar: this.avatar,
role: this.role,
emailVerified: this.emailVerified,
plugins: this.plugins,
createdAt: this.createdAt,
updatedAt: this.updatedAt,
};
};
userSchema.methods.generateToken = async function () {
return await signPayload({
payload: {
id: this._id,
username: this.username,
provider: this.provider,
email: this.email,
},
secret: process.env.JWT_SECRET,
expirationTime: expires / 1000,
});
};
userSchema.methods.comparePassword = function (candidatePassword, callback) {
bcrypt.compare(candidatePassword, this.password, (err, isMatch) => {
if (err) {
return callback(err);
}
callback(null, isMatch);
});
};
module.exports.hashPassword = async (password) => {
const hashedPassword = await new Promise((resolve, reject) => {
bcrypt.hash(password, 10, function (err, hash) {
if (err) {
reject(err);
} else {
resolve(hash);
}
});
});
return hashedPassword;
};
const userSchema = require('~/models/schema/userSchema');
const User = mongoose.model('User', userSchema);

View File

@@ -6,9 +6,18 @@ const {
deleteMessagesSince,
deleteMessages,
} = require('./Message');
const {
comparePassword,
deleteUserById,
generateToken,
getUserById,
updateUser,
createUser,
countUsers,
findUser,
} = require('./userMethods');
const { getConvoTitle, getConvo, saveConvo, deleteConvos } = require('./Conversation');
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
const { hashPassword, getUser, updateUser } = require('./userMethods');
const {
findFileById,
createFile,
@@ -29,9 +38,14 @@ module.exports = {
Session,
Balance,
hashPassword,
comparePassword,
deleteUserById,
generateToken,
getUserById,
countUsers,
createUser,
updateUser,
getUser,
findUser,
getMessages,
saveMessage,

View File

@@ -0,0 +1,19 @@
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
const categoriesSchema = new Schema({
label: {
type: String,
required: true,
unique: true,
},
value: {
type: String,
required: true,
unique: true,
},
});
const categories = mongoose.model('categories', categoriesSchema);
module.exports = { Categories: categories };

View File

@@ -0,0 +1,31 @@
const mongoose = require('mongoose');
const conversationTagSchema = mongoose.Schema(
{
tag: {
type: String,
index: true,
},
user: {
type: String,
index: true,
},
description: {
type: String,
index: true,
},
count: {
type: Number,
default: 0,
},
position: {
type: Number,
default: 0,
},
},
{ timestamps: true },
);
conversationTagSchema.index({ tag: 1, user: 1 }, { unique: true });
module.exports = mongoose.model('ConversationTag', conversationTagSchema);

View File

@@ -42,6 +42,11 @@ const convoSchema = mongoose.Schema(
invocationId: {
type: Number,
},
tags: {
type: [String],
default: [],
meiliIndex: true,
},
},
{ timestamps: true },
);

View File

@@ -103,6 +103,10 @@ const conversationPreset = {
spec: {
type: String,
},
tags: {
type: [String],
default: [],
},
tools: { type: [{ type: String }], default: undefined },
maxContextTokens: {
type: Number,

View File

@@ -3,9 +3,9 @@ const mongoose = require('mongoose');
/**
* @typedef {Object} MongoFile
* @property {mongoose.Schema.Types.ObjectId} [_id] - MongoDB Document ID
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {number} [__v] - MongoDB Version Key
* @property {mongoose.Schema.Types.ObjectId} user - User ID
* @property {ObjectId} user - User ID
* @property {string} [conversationId] - Optional conversation ID
* @property {string} file_id - File identifier
* @property {string} [temp_file_id] - Temporary File identifier
@@ -14,17 +14,19 @@ const mongoose = require('mongoose');
* @property {string} filepath - Location of the file
* @property {'file'} object - Type of object, always 'file'
* @property {string} type - Type of file
* @property {number} usage - Number of uses of the file
* @property {number} [usage=0] - Number of uses of the file
* @property {string} [context] - Context of the file origin
* @property {boolean} [embedded] - Whether or not the file is embedded in vector db
* @property {boolean} [embedded=false] - Whether or not the file is embedded in vector db
* @property {string} [model] - The model to identify the group region of the file (for Azure OpenAI hosting)
* @property {string} [source] - The source of the file
* @property {string} [source] - The source of the file (e.g., from FileSources)
* @property {number} [width] - Optional width of the file
* @property {number} [height] - Optional height of the file
* @property {Date} [expiresAt] - Optional height of the file
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {Date} [createdAt] - Date when the file was created
* @property {Date} [updatedAt] - Date when the file was updated
*/
/** @type {MongooseSchema<MongoFile>} */
const fileSchema = mongoose.Schema(
{
user: {
@@ -91,7 +93,7 @@ const fileSchema = mongoose.Schema(
height: Number,
expiresAt: {
type: Date,
expires: 3600,
expires: 3600, // 1 hour in seconds
},
},
{

View File

@@ -11,6 +11,7 @@ const messageSchema = mongoose.Schema(
},
conversationId: {
type: String,
index: true,
required: true,
meiliIndex: true,
},
@@ -128,6 +129,7 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
}
messageSchema.index({ createdAt: 1 });
messageSchema.index({ messageId: 1, user: 1 }, { unique: true });
const Message = mongoose.models.Message || mongoose.model('Message', messageSchema);

View File

@@ -0,0 +1,30 @@
const { Schema } = require('mongoose');
/**
* @typedef {Object} MongoProject
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} name - The name of the project
* @property {ObjectId[]} promptGroupIds - Array of PromptGroup IDs associated with the project
* @property {Date} [createdAt] - Date when the project was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the project was last updated (added by timestamps)
*/
const projectSchema = new Schema(
{
name: {
type: String,
required: true,
index: true,
},
promptGroupIds: {
type: [Schema.Types.ObjectId],
ref: 'PromptGroup',
default: [],
},
},
{
timestamps: true,
},
);
module.exports = projectSchema;

View File

@@ -0,0 +1,118 @@
const mongoose = require('mongoose');
const { Constants } = require('librechat-data-provider');
const Schema = mongoose.Schema;
/**
* @typedef {Object} MongoPromptGroup
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} name - The name of the prompt group
* @property {ObjectId} author - The author of the prompt group
* @property {ObjectId} [projectId=null] - The project ID of the prompt group
* @property {ObjectId} [productionId=null] - The project ID of the prompt group
* @property {string} authorName - The name of the author of the prompt group
* @property {number} [numberOfGenerations=0] - Number of generations the prompt group has
* @property {string} [oneliner=''] - Oneliner description of the prompt group
* @property {string} [category=''] - Category of the prompt group
* @property {string} [command] - Command for the prompt group
* @property {Date} [createdAt] - Date when the prompt group was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the prompt group was last updated (added by timestamps)
*/
const promptGroupSchema = new Schema(
{
name: {
type: String,
required: true,
index: true,
},
numberOfGenerations: {
type: Number,
default: 0,
},
oneliner: {
type: String,
default: '',
},
category: {
type: String,
default: '',
index: true,
},
projectIds: {
type: [Schema.Types.ObjectId],
ref: 'Project',
index: true,
},
productionId: {
type: Schema.Types.ObjectId,
ref: 'Prompt',
required: true,
index: true,
},
author: {
type: Schema.Types.ObjectId,
ref: 'User',
required: true,
index: true,
},
authorName: {
type: String,
required: true,
},
command: {
type: String,
index: true,
validate: {
validator: function (v) {
return v === undefined || v === null || v === '' || /^[a-z0-9-]+$/.test(v);
},
message: (props) =>
`${props.value} is not a valid command. Only lowercase alphanumeric characters and highfins (') are allowed.`,
},
maxlength: [
Constants.COMMANDS_MAX_LENGTH,
`Command cannot be longer than ${Constants.COMMANDS_MAX_LENGTH} characters`,
],
},
},
{
timestamps: true,
},
);
const PromptGroup = mongoose.model('PromptGroup', promptGroupSchema);
const promptSchema = new Schema(
{
groupId: {
type: Schema.Types.ObjectId,
ref: 'PromptGroup',
required: true,
index: true,
},
author: {
type: Schema.Types.ObjectId,
ref: 'User',
required: true,
},
prompt: {
type: String,
required: true,
},
type: {
type: String,
enum: ['text', 'chat'],
required: true,
},
},
{
timestamps: true,
},
);
const Prompt = mongoose.model('Prompt', promptSchema);
promptSchema.index({ createdAt: 1, updatedAt: 1 });
promptGroupSchema.index({ createdAt: 1, updatedAt: 1 });
module.exports = { Prompt, PromptGroup };

View File

@@ -0,0 +1,29 @@
const { PermissionTypes, Permissions } = require('librechat-data-provider');
const mongoose = require('mongoose');
const roleSchema = new mongoose.Schema({
name: {
type: String,
required: true,
unique: true,
index: true,
},
[PermissionTypes.PROMPTS]: {
[Permissions.SHARED_GLOBAL]: {
type: Boolean,
default: false,
},
[Permissions.USE]: {
type: Boolean,
default: true,
},
[Permissions.CREATE]: {
type: Boolean,
default: true,
},
},
});
const Role = mongoose.model('Role', roleSchema);
module.exports = Role;

View File

@@ -7,6 +7,9 @@ const tokenSchema = new Schema({
required: true,
ref: 'user',
},
email: {
type: String,
},
token: {
type: String,
required: true,

View File

@@ -1,5 +1,35 @@
const mongoose = require('mongoose');
/**
* @typedef {Object} MongoSession
* @property {string} [refreshToken] - The refresh token
*/
/**
* @typedef {Object} MongoUser
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} [name] - The user's name
* @property {string} [username] - The user's username, in lowercase
* @property {string} email - The user's email address
* @property {boolean} emailVerified - Whether the user's email is verified
* @property {string} [password] - The user's password, trimmed with 8-128 characters
* @property {string} [avatar] - The URL of the user's avatar
* @property {string} provider - The provider of the user's account (e.g., 'local', 'google')
* @property {string} [role='USER'] - The role of the user
* @property {string} [googleId] - Optional Google ID for the user
* @property {string} [facebookId] - Optional Facebook ID for the user
* @property {string} [openidId] - Optional OpenID ID for the user
* @property {string} [ldapId] - Optional LDAP ID for the user
* @property {string} [githubId] - Optional GitHub ID for the user
* @property {string} [discordId] - Optional Discord ID for the user
* @property {Array} [plugins=[]] - List of plugins used by the user
* @property {Array.<MongoSession>} [refreshToken] - List of sessions with refresh tokens
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {Date} [createdAt] - Date when the user was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the user was last updated (added by timestamps)
*/
/** @type {MongooseSchema<MongoSession>} */
const Session = mongoose.Schema({
refreshToken: {
type: String,
@@ -7,6 +37,7 @@ const Session = mongoose.Schema({
},
});
/** @type {MongooseSchema<MongoUser>} */
const userSchema = mongoose.Schema(
{
name: {
@@ -86,6 +117,10 @@ const userSchema = mongoose.Schema(
refreshToken: {
type: [Session],
},
expiresAt: {
type: Date,
expires: 604800, // 7 days in seconds
},
},
{ timestamps: true },
);

View File

@@ -12,11 +12,13 @@ const tokenValues = {
'4k': { prompt: 1.5, completion: 2 },
'16k': { prompt: 3, completion: 4 },
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 5, completion: 15 },
'gpt-4-1106': { prompt: 10, completion: 30 },
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
'claude-3-opus': { prompt: 15, completion: 75 },
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
@@ -53,6 +55,8 @@ const getValueKey = (model, endpoint) => {
return 'gpt-3.5-turbo-1106';
} else if (modelName.includes('gpt-3.5')) {
return '4k';
} else if (modelName.includes('gpt-4o-mini')) {
return 'gpt-4o-mini';
} else if (modelName.includes('gpt-4o')) {
return 'gpt-4o';
} else if (modelName.includes('gpt-4-vision')) {

View File

@@ -48,6 +48,19 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4o-turbo')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-0125')).toBe('gpt-4o');
});
it('should return "gpt-4o-mini" for model type of "gpt-4o-mini"', () => {
expect(getValueKey('gpt-4o-mini-2024-07-18')).toBe('gpt-4o-mini');
expect(getValueKey('openai/gpt-4o-mini')).toBe('gpt-4o-mini');
expect(getValueKey('gpt-4o-mini-0718')).toBe('gpt-4o-mini');
});
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
expect(getValueKey('claude-3-5-sonnet-turbo')).toBe('claude-3-5-sonnet');
expect(getValueKey('claude-3-5-sonnet-0125')).toBe('claude-3-5-sonnet');
});
});
describe('getMultiplier', () => {
@@ -102,6 +115,19 @@ describe('getMultiplier', () => {
);
});
it('should return the correct multiplier for gpt-4o-mini', () => {
const valueKey = getValueKey('gpt-4o-mini-2024-07-18');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
tokenValues['gpt-4o-mini'].prompt,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-4o-mini'].completion,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
tokenValues['gpt-4-1106'].completion,
);
});
it('should derive the valueKey from the model if not provided for new models', () => {
expect(
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),

View File

@@ -1,28 +1,37 @@
const bcrypt = require('bcryptjs');
const signPayload = require('~/server/services/signPayload');
const User = require('./User');
const hashPassword = async (password) => {
const hashedPassword = await new Promise((resolve, reject) => {
bcrypt.hash(password, 10, function (err, hash) {
if (err) {
reject(err);
} else {
resolve(hash);
}
});
});
return hashedPassword;
};
/**
* Retrieve a user by ID and convert the found user document to a plain object.
*
* @param {string} userId - The ID of the user to find and return as a plain object.
* @returns {Promise<Object>} A plain object representing the user document, or `null` if no user is found.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoUser>} A plain object representing the user document, or `null` if no user is found.
*/
const getUser = async function (userId) {
return await User.findById(userId).lean();
const getUserById = async function (userId, fieldsToSelect = null) {
const query = User.findById(userId);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
/**
* Search for a single user based on partial data and return matching user document as plain object.
* @param {Partial<MongoUser>} searchCriteria - The partial data to use for searching the user.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoUser>} A plain object representing the user document, or `null` if no user is found.
*/
const findUser = async function (searchCriteria, fieldsToSelect = null) {
const query = User.findOne(searchCriteria);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
/**
@@ -30,17 +39,127 @@ const getUser = async function (userId) {
*
* @param {string} userId - The ID of the user to update.
* @param {Object} updateData - An object containing the properties to update.
* @returns {Promise<Object>} The updated user document as a plain object, or `null` if no user is found.
* @returns {Promise<MongoUser>} The updated user document as a plain object, or `null` if no user is found.
*/
const updateUser = async function (userId, updateData) {
return await User.findByIdAndUpdate(userId, updateData, {
const updateOperation = {
$set: updateData,
$unset: { expiresAt: '' }, // Remove the expiresAt field to prevent TTL
};
return await User.findByIdAndUpdate(userId, updateOperation, {
new: true,
runValidators: true,
}).lean();
};
module.exports = {
hashPassword,
updateUser,
getUser,
/**
* Creates a new user, optionally with a TTL of 1 week.
* @param {MongoUser} data - The user data to be created, must contain user_id.
* @param {boolean} [disableTTL=true] - Whether to disable the TTL. Defaults to `true`.
* @param {boolean} [returnUser=false] - Whether to disable the TTL. Defaults to `true`.
* @returns {Promise<ObjectId>} A promise that resolves to the created user document ID.
* @throws {Error} If a user with the same user_id already exists.
*/
const createUser = async (data, disableTTL = true, returnUser = false) => {
const userData = {
...data,
expiresAt: disableTTL ? null : new Date(Date.now() + 604800 * 1000), // 1 week in milliseconds
};
if (disableTTL) {
delete userData.expiresAt;
}
const user = await User.create(userData);
if (returnUser) {
return user.toObject();
}
return user._id;
};
/**
* Count the number of user documents in the collection based on the provided filter.
*
* @param {Object} [filter={}] - The filter to apply when counting the documents.
* @returns {Promise<number>} The count of documents that match the filter.
*/
const countUsers = async function (filter = {}) {
return await User.countDocuments(filter);
};
/**
* Delete a user by their unique ID.
*
* @param {string} userId - The ID of the user to delete.
* @returns {Promise<{ deletedCount: number }>} An object indicating the number of deleted documents.
*/
const deleteUserById = async function (userId) {
try {
const result = await User.deleteOne({ _id: userId });
if (result.deletedCount === 0) {
return { deletedCount: 0, message: 'No user found with that ID.' };
}
return { deletedCount: result.deletedCount, message: 'User was deleted successfully.' };
} catch (error) {
throw new Error('Error deleting user: ' + error.message);
}
};
const { SESSION_EXPIRY } = process.env ?? {};
const expires = eval(SESSION_EXPIRY) ?? 1000 * 60 * 15;
/**
* Generates a JWT token for a given user.
*
* @param {MongoUser} user - ID of the user for whom the token is being generated.
* @returns {Promise<string>} A promise that resolves to a JWT token.
*/
const generateToken = async (user) => {
if (!user) {
throw new Error('No user provided');
}
return await signPayload({
payload: {
id: user._id,
username: user.username,
provider: user.provider,
email: user.email,
},
secret: process.env.JWT_SECRET,
expirationTime: expires / 1000,
});
};
/**
* Compares the provided password with the user's password.
*
* @param {MongoUser} user - the user to compare password for.
* @param {string} candidatePassword - The password to test against the user's password.
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the password matches.
*/
const comparePassword = async (user, candidatePassword) => {
if (!user) {
throw new Error('No user provided');
}
return new Promise((resolve, reject) => {
bcrypt.compare(candidatePassword, user.password, (err, isMatch) => {
if (err) {
reject(err);
}
resolve(isMatch);
});
});
};
module.exports = {
comparePassword,
deleteUserById,
generateToken,
getUserById,
countUsers,
createUser,
updateUser,
findUser,
};

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "0.7.2",
"version": "0.7.4-rc1",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -41,7 +41,6 @@
"@langchain/community": "^0.0.46",
"@langchain/google-genai": "^0.0.11",
"@langchain/google-vertexai": "^0.0.17",
"agenda": "^5.0.0",
"axios": "^1.3.4",
"bcryptjs": "^2.4.3",
"cheerio": "^1.0.0-rc.12",

View File

@@ -1,8 +1,9 @@
const throttle = require('lodash/throttle');
const { getResponseSender, Constants, EModelEndpoint } = require('librechat-data-provider');
const { getResponseSender, Constants, CacheKeys, Time } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { saveMessage, getConvo } = require('~/models');
const { getLogStores } = require('~/cache');
const { saveMessage } = require('~/models');
const { logger } = require('~/config');
const AskController = async (req, res, next, initializeClient, addTitle) => {
@@ -18,6 +19,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
logger.debug('[AskController]', { text, conversationId, ...endpointOption });
let userMessage;
let userMessagePromise;
let promptTokens;
let userMessageId;
let responseMessageId;
@@ -34,6 +36,8 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
@@ -48,11 +52,13 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
try {
const { client } = await initializeClient({ req, res, endpointOption });
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
const messageCache = getLogStores(CacheKeys.MESSAGES);
const { onProgress: progressCallback, getPartialText } = createOnProgress({
onProgress: throttle(
({ text: partialText }) => {
saveMessage({
/*
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
messageCache.set(responseMessageId, {
messageId: responseMessageId,
sender,
conversationId,
@@ -62,7 +68,10 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
unfinished,
error: false,
user,
});
}, Time.FIVE_MINUTES);
*/
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
},
3000,
{ trailing: false },
@@ -74,6 +83,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
const getAbortData = () => ({
sender,
conversationId,
userMessagePromise,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
@@ -81,7 +91,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
res.on('close', () => {
logger.debug('[AskController] Request closed');
@@ -108,7 +118,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
progressCallback,
progressOptions: {
res,
text,
// parentMessageId: overrideParentMessageId || userMessageId,
},
};
@@ -121,7 +130,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
response.endpoint = endpointOption.endpoint;
const conversation = await getConvo(user, conversationId);
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
@@ -141,10 +150,18 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
});
res.end();
await saveMessage({ ...response, user });
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/AskController.js - response end' },
);
}
await saveMessage(userMessage);
if (!client.skipSaveUserMessage) {
await saveMessage(req, userMessage, {
context: 'api/server/controllers/AskController.js - don\'t skip saving user message',
});
}
if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {

View File

@@ -1,45 +1,29 @@
const crypto = require('crypto');
const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const { Session, User } = require('~/models');
const {
registerUser,
resetPassword,
setAuthTokens,
requestPasswordReset,
} = require('~/server/services/AuthService');
const { Session, getUserById } = require('~/models');
const { logger } = require('~/config');
const registrationController = async (req, res) => {
try {
const response = await registerUser(req.body);
if (response.status === 200) {
const { status, user } = response;
let newUser = await User.findOne({ _id: user._id });
if (!newUser) {
newUser = new User(user);
await newUser.save();
}
const token = await setAuthTokens(user._id, res);
res.setHeader('Authorization', `Bearer ${token}`);
res.status(status).send({ user });
} else {
const { status, message } = response;
res.status(status).send({ message });
}
const { status, message } = response;
res.status(status).send({ message });
} catch (err) {
logger.error('[registrationController]', err);
return res.status(500).json({ message: err.message });
}
};
const getUserController = async (req, res) => {
return res.status(200).send(req.user);
};
const resetPasswordRequestController = async (req, res) => {
try {
const resetService = await requestPasswordReset(req.body.email);
const resetService = await requestPasswordReset(req);
if (resetService instanceof Error) {
return res.status(400).json(resetService);
} else {
@@ -77,7 +61,7 @@ const refreshController = async (req, res) => {
try {
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const user = await User.findOne({ _id: payload.id });
const user = await getUserById(payload.id, '-password -__v');
if (!user) {
return res.status(401).redirect('/login');
}
@@ -86,8 +70,7 @@ const refreshController = async (req, res) => {
if (process.env.NODE_ENV === 'CI') {
const token = await setAuthTokens(userId, res);
const userObj = user.toJSON();
return res.status(200).send({ token, user: userObj });
return res.status(200).send({ token, user });
}
// Hash the refresh token
@@ -98,8 +81,7 @@ const refreshController = async (req, res) => {
const session = await Session.findOne({ user: userId, refreshTokenHash: hashedToken });
if (session && session.expiration > new Date()) {
const token = await setAuthTokens(userId, res, session._id);
const userObj = user.toJSON();
res.status(200).send({ token, user: userObj });
res.status(200).send({ token, user });
} else if (req?.query?.retry) {
// Retrying from a refresh token request that failed (401)
res.status(403).send('No session found');
@@ -115,7 +97,6 @@ const refreshController = async (req, res) => {
};
module.exports = {
getUserController,
refreshController,
registrationController,
resetPasswordController,

View File

@@ -1,8 +1,9 @@
const throttle = require('lodash/throttle');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
const { getResponseSender, CacheKeys, Time } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { saveMessage, getConvo } = require('~/models');
const { getLogStores } = require('~/cache');
const { saveMessage } = require('~/models');
const { logger } = require('~/config');
const EditController = async (req, res, next, initializeClient) => {
@@ -27,6 +28,7 @@ const EditController = async (req, res, next, initializeClient) => {
});
let userMessage;
let userMessagePromise;
let promptTokens;
const sender = getResponseSender({
...endpointOption,
@@ -40,6 +42,8 @@ const EditController = async (req, res, next, initializeClient) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
@@ -48,12 +52,14 @@ const EditController = async (req, res, next, initializeClient) => {
}
};
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
const messageCache = getLogStores(CacheKeys.MESSAGES);
const { onProgress: progressCallback, getPartialText } = createOnProgress({
generation,
onProgress: throttle(
({ text: partialText }) => {
saveMessage({
/*
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
{
messageId: responseMessageId,
sender,
conversationId,
@@ -64,7 +70,8 @@ const EditController = async (req, res, next, initializeClient) => {
isEdited: true,
error: false,
user,
});
} */
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
},
3000,
{ trailing: false },
@@ -73,6 +80,7 @@ const EditController = async (req, res, next, initializeClient) => {
const getAbortData = () => ({
conversationId,
userMessagePromise,
messageId: responseMessageId,
sender,
parentMessageId: overrideParentMessageId ?? userMessageId,
@@ -81,7 +89,7 @@ const EditController = async (req, res, next, initializeClient) => {
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
res.on('close', () => {
logger.debug('[EditController] Request closed');
@@ -115,12 +123,11 @@ const EditController = async (req, res, next, initializeClient) => {
progressCallback,
progressOptions: {
res,
text,
// parentMessageId: overrideParentMessageId || userMessageId,
},
});
const conversation = await getConvo(user, conversationId);
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
@@ -138,7 +145,11 @@ const EditController = async (req, res, next, initializeClient) => {
});
res.end();
await saveMessage({ ...response, user });
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/EditController.js - response end' },
);
}
} catch (error) {
const partialText = getPartialText();

View File

@@ -1,11 +1,37 @@
const { updateUserPluginsService } = require('~/server/services/UserService');
const {
Session,
Balance,
getFiles,
deleteFiles,
deleteConvos,
deletePresets,
deleteMessages,
deleteUserById,
} = require('~/models');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
const { processDeleteRequest } = require('~/server/services/Files/process');
const { deleteAllSharedLinks } = require('~/models/Share');
const { Transaction } = require('~/models/Transaction');
const { logger } = require('~/config');
const getUserController = async (req, res) => {
res.status(200).send(req.user);
};
const deleteUserFiles = async (req) => {
try {
const userFiles = await getFiles({ user: req.user.id });
await processDeleteRequest({
req,
files: userFiles,
});
} catch (error) {
logger.error('[deleteUserFiles]', error);
}
};
const updateUserPluginsController = async (req, res) => {
const { user } = req;
const { pluginKey, action, auth, isAssistantTool } = req.body;
@@ -49,11 +75,68 @@ const updateUserPluginsController = async (req, res) => {
res.status(200).send();
} catch (err) {
logger.error('[updateUserPluginsController]', err);
res.status(500).json({ message: err.message });
return res.status(500).json({ message: 'Something went wrong.' });
}
};
const deleteUserController = async (req, res) => {
const { user } = req;
try {
await deleteMessages({ user: user.id }); // delete user messages
await Session.deleteMany({ user: user.id }); // delete user sessions
await Transaction.deleteMany({ user: user.id }); // delete user transactions
await deleteUserKey({ userId: user.id, all: true }); // delete user keys
await Balance.deleteMany({ user: user._id }); // delete user balances
await deletePresets(user.id); // delete user presets
/* TODO: Delete Assistant Threads */
await deleteConvos(user.id); // delete user convos
await deleteUserPluginAuth(user.id, null, true); // delete user plugin auth
await deleteUserById(user.id); // delete user
await deleteAllSharedLinks(user.id); // delete user shared links
await deleteUserFiles(req); // delete user files
await deleteFiles(null, user.id); // delete database files in case of orphaned files from previous steps
/* TODO: queue job for cleaning actions and assistants of non-existant users */
logger.info(`User deleted account. Email: ${user.email} ID: ${user.id}`);
res.status(200).send({ message: 'User deleted' });
} catch (err) {
logger.error('[deleteUserController]', err);
return res.status(500).json({ message: 'Something went wrong.' });
}
};
const verifyEmailController = async (req, res) => {
try {
const verifyEmailService = await verifyEmail(req);
if (verifyEmailService instanceof Error) {
return res.status(400).json(verifyEmailService);
} else {
return res.status(200).json(verifyEmailService);
}
} catch (e) {
logger.error('[verifyEmailController]', e);
return res.status(500).json({ message: 'Something went wrong.' });
}
};
const resendVerificationController = async (req, res) => {
try {
const result = await resendVerificationEmail(req);
if (result instanceof Error) {
return res.status(400).json(result);
} else {
return res.status(200).json(result);
}
} catch (e) {
logger.error('[verifyEmailController]', e);
return res.status(500).json({ message: 'Something went wrong.' });
}
};
module.exports = {
getUserController,
deleteUserController,
verifyEmailController,
updateUserPluginsController,
resendVerificationController,
};

View File

@@ -120,21 +120,22 @@ const chatV1 = async (req, res) => {
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(res, messageData, errorMessage);
return sendResponse(req, res, messageData, errorMessage);
} else if (error?.message?.includes('string too long')) {
return sendResponse(
req,
res,
messageData,
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
);
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
return sendResponse(res, messageData, error.message);
return sendResponse(req, res, messageData, error.message);
} else {
logger.error('[/assistants/chat/]', error);
}
if (!openai || !thread_id || !run_id) {
return sendResponse(res, messageData, defaultErrorMessage);
return sendResponse(req, res, messageData, defaultErrorMessage);
}
await sleep(2000);
@@ -221,10 +222,10 @@ const chatV1 = async (req, res) => {
};
} catch (error) {
logger.error('[/assistants/chat/] Error finalizing error process', error);
return sendResponse(res, messageData, 'The Assistant run failed');
return sendResponse(req, res, messageData, 'The Assistant run failed');
}
return sendResponse(res, finalEvent);
return sendResponse(req, res, finalEvent);
};
try {
@@ -382,6 +383,9 @@ const chatV1 = async (req, res) => {
return files;
};
/** @type {Promise<Run>|undefined} */
let userMessagePromise;
const initializeThread = async () => {
/** @type {[ undefined | MongoFile[]]}*/
const [processedFiles] = await Promise.all([addVisionPrompt(), getRequestFileIds()]);
@@ -438,7 +442,7 @@ const chatV1 = async (req, res) => {
previousMessages.push(requestMessage);
/* asynchronous */
saveUserMessage({ ...requestMessage, model });
userMessagePromise = saveUserMessage(req, { ...requestMessage, model });
conversation = {
conversationId,
@@ -582,7 +586,10 @@ const chatV1 = async (req, res) => {
});
res.end();
await saveAssistantMessage({ ...responseMessage, model });
if (userMessagePromise) {
await userMessagePromise;
}
await saveAssistantMessage(req, { ...responseMessage, model });
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
addTitle(req, {

View File

@@ -1,12 +1,12 @@
const { v4 } = require('uuid');
const {
Time,
Constants,
RunStatus,
CacheKeys,
ContentTypes,
ToolCallTypes,
EModelEndpoint,
ViolationTypes,
retrievalMimeTypes,
AssistantStreamEvents,
} = require('librechat-data-provider');
@@ -14,12 +14,12 @@ const {
initThread,
recordUsage,
saveUserMessage,
checkMessageGaps,
addThreadMetadata,
saveAssistantMessage,
} = require('~/server/services/Threads');
const { sendResponse, sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
const { sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
const { createErrorHandler } = require('~/server/controllers/assistants/errors');
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
const { createRun, StreamRunManager } = require('~/server/services/Runs');
const { addTitle } = require('~/server/services/Endpoints/assistants');
@@ -44,7 +44,7 @@ const ten_minutes = 1000 * 60 * 10;
const chatV2 = async (req, res) => {
logger.debug('[/assistants/chat/] req.body', req.body);
/** @type {{ files: MongoFile[]}} */
/** @type {{files: MongoFile[]}} */
const {
text,
model,
@@ -90,139 +90,20 @@ const chatV2 = async (req, res) => {
/** @type {Run | undefined} - The completed run, undefined if incomplete */
let completedRun;
const handleError = async (error) => {
const defaultErrorMessage =
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
const messageData = {
thread_id,
assistant_id,
conversationId,
parentMessageId,
sender: 'System',
user: req.user.id,
shouldSaveMessage: false,
messageId: responseMessageId,
endpoint,
};
const getContext = () => ({
openai,
run_id,
endpoint,
cacheKey,
thread_id,
completedRun,
assistant_id,
conversationId,
parentMessageId,
responseMessageId,
});
if (error.message === 'Run cancelled') {
return res.end();
} else if (error.message === 'Request closed' && completedRun) {
return;
} else if (error.message === 'Request closed') {
logger.debug('[/assistants/chat/] Request aborted on close');
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === EModelEndpoint.azureAssistants
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(res, messageData, errorMessage);
} else if (error?.message?.includes('string too long')) {
return sendResponse(
res,
messageData,
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
);
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
return sendResponse(res, messageData, error.message);
} else {
logger.error('[/assistants/chat/]', error);
}
if (!openai || !thread_id || !run_id) {
return sendResponse(res, messageData, defaultErrorMessage);
}
await sleep(2000);
try {
const status = await cache.get(cacheKey);
if (status === 'cancelled') {
logger.debug('[/assistants/chat/] Run already cancelled');
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[/assistants/chat/] Error cancelling run', error);
}
await sleep(2000);
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error('[/assistants/chat/] Error fetching or processing run', error);
}
let finalEvent;
try {
const runMessages = await checkMessageGaps({
openai,
run_id,
endpoint,
thread_id,
conversationId,
latestMessageId: responseMessageId,
});
const errorContentPart = {
text: {
value:
error?.message ?? 'There was an error processing your request. Please try again later.',
},
type: ContentTypes.ERROR,
};
if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) {
runMessages[runMessages.length - 1].content = [errorContentPart];
} else {
const contentParts = runMessages[runMessages.length - 1].content;
for (let i = 0; i < contentParts.length; i++) {
const currentPart = contentParts[i];
/** @type {CodeToolCall | RetrievalToolCall | FunctionToolCall | undefined} */
const toolCall = currentPart?.[ContentTypes.TOOL_CALL];
if (
toolCall &&
toolCall?.function &&
!(toolCall?.function?.output || toolCall?.function?.output?.length)
) {
contentParts[i] = {
...currentPart,
[ContentTypes.TOOL_CALL]: {
...toolCall,
function: {
...toolCall.function,
output: 'error processing tool',
},
},
};
}
}
runMessages[runMessages.length - 1].content.push(errorContentPart);
}
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
runMessages,
};
} catch (error) {
logger.error('[/assistants/chat/] Error finalizing error process', error);
return sendResponse(res, messageData, 'The Assistant run failed');
}
return sendResponse(res, finalEvent);
};
const handleError = createErrorHandler({ req, res, getContext });
try {
res.on('close', async () => {
@@ -365,6 +246,9 @@ const chatV2 = async (req, res) => {
}
};
/** @type {Promise<Run>|undefined} */
let userMessagePromise;
const initializeThread = async () => {
await getRequestFileIds();
@@ -407,7 +291,7 @@ const chatV2 = async (req, res) => {
previousMessages.push(requestMessage);
/* asynchronous */
saveUserMessage({ ...requestMessage, model });
userMessagePromise = saveUserMessage(req, { ...requestMessage, model });
conversation = {
conversationId,
@@ -489,6 +373,11 @@ const chatV2 = async (req, res) => {
},
};
/** @type {undefined | TAssistantEndpoint} */
const config = req.app.locals[endpoint] ?? {};
/** @type {undefined | TBaseEndpoint} */
const allConfig = req.app.locals.all;
const streamRunManager = new StreamRunManager({
req,
res,
@@ -498,6 +387,7 @@ const chatV2 = async (req, res) => {
attachedFileIds,
parentMessageId: userMessageId,
responseMessage: openai.responseMessage,
streamRate: allConfig?.streamRate ?? config.streamRate,
// streamOptions: {
// },
@@ -510,6 +400,16 @@ const chatV2 = async (req, res) => {
response = streamRunManager;
response.text = streamRunManager.intermediateText;
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessageId,
{
complete: true,
text: response.text,
},
Time.FIVE_MINUTES,
);
};
await processRun();
@@ -552,7 +452,10 @@ const chatV2 = async (req, res) => {
});
res.end();
await saveAssistantMessage({ ...responseMessage, model });
if (userMessagePromise) {
await userMessagePromise;
}
await saveAssistantMessage(req, { ...responseMessage, model });
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
addTitle(req, {

View File

@@ -0,0 +1,193 @@
// errorHandler.js
const { sendResponse } = require('~/server/utils');
const { logger } = require('~/config');
const getLogStores = require('~/cache/getLogStores');
const { CacheKeys, ViolationTypes, ContentTypes } = require('librechat-data-provider');
const { getConvo } = require('~/models/Conversation');
const { recordUsage, checkMessageGaps } = require('~/server/services/Threads');
/**
* @typedef {Object} ErrorHandlerContext
* @property {OpenAIClient} openai - The OpenAI client
* @property {string} thread_id - The thread ID
* @property {string} run_id - The run ID
* @property {boolean} completedRun - Whether the run has completed
* @property {string} assistant_id - The assistant ID
* @property {string} conversationId - The conversation ID
* @property {string} parentMessageId - The parent message ID
* @property {string} responseMessageId - The response message ID
* @property {string} endpoint - The endpoint being used
* @property {string} cacheKey - The cache key for the current request
*/
/**
* @typedef {Object} ErrorHandlerDependencies
* @property {Express.Request} req - The Express request object
* @property {Express.Response} res - The Express response object
* @property {() => ErrorHandlerContext} getContext - Function to get the current context
* @property {string} [originPath] - The origin path for the error handler
*/
/**
* Creates an error handler function with the given dependencies
* @param {ErrorHandlerDependencies} dependencies - The dependencies for the error handler
* @returns {(error: Error) => Promise<void>} The error handler function
*/
const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/chat/' }) => {
const cache = getLogStores(CacheKeys.ABORT_KEYS);
/**
* Handles errors that occur during the chat process
* @param {Error} error - The error that occurred
* @returns {Promise<void>}
*/
return async (error) => {
const {
openai,
run_id,
endpoint,
cacheKey,
thread_id,
completedRun,
assistant_id,
conversationId,
parentMessageId,
responseMessageId,
} = getContext();
const defaultErrorMessage =
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
const messageData = {
thread_id,
assistant_id,
conversationId,
parentMessageId,
sender: 'System',
user: req.user.id,
shouldSaveMessage: false,
messageId: responseMessageId,
endpoint,
};
if (error.message === 'Run cancelled') {
return res.end();
} else if (error.message === 'Request closed' && completedRun) {
return;
} else if (error.message === 'Request closed') {
logger.debug(`[${originPath}] Request aborted on close`);
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === 'azureAssistants'
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(req, res, messageData, errorMessage);
} else if (error?.message?.includes('string too long')) {
return sendResponse(
req,
res,
messageData,
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
);
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
return sendResponse(req, res, messageData, error.message);
} else {
logger.error(`[${originPath}]`, error);
}
if (!openai || !thread_id || !run_id) {
return sendResponse(req, res, messageData, defaultErrorMessage);
}
await new Promise((resolve) => setTimeout(resolve, 2000));
try {
const status = await cache.get(cacheKey);
if (status === 'cancelled') {
logger.debug(`[${originPath}] Run already cancelled`);
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
}
await new Promise((resolve) => setTimeout(resolve, 2000));
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error(`[${originPath}] Error fetching or processing run`, error);
}
let finalEvent;
try {
const runMessages = await checkMessageGaps({
openai,
run_id,
endpoint,
thread_id,
conversationId,
latestMessageId: responseMessageId,
});
const errorContentPart = {
text: {
value:
error?.message ?? 'There was an error processing your request. Please try again later.',
},
type: ContentTypes.ERROR,
};
if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) {
runMessages[runMessages.length - 1].content = [errorContentPart];
} else {
const contentParts = runMessages[runMessages.length - 1].content;
for (let i = 0; i < contentParts.length; i++) {
const currentPart = contentParts[i];
/** @type {CodeToolCall | RetrievalToolCall | FunctionToolCall | undefined} */
const toolCall = currentPart?.[ContentTypes.TOOL_CALL];
if (
toolCall &&
toolCall?.function &&
!(toolCall?.function?.output || toolCall?.function?.output?.length)
) {
contentParts[i] = {
...currentPart,
[ContentTypes.TOOL_CALL]: {
...toolCall,
function: {
...toolCall.function,
output: 'error processing tool',
},
},
};
}
}
runMessages[runMessages.length - 1].content.push(errorContentPart);
}
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
runMessages,
};
} catch (error) {
logger.error(`[${originPath}] Error finalizing error process`, error);
return sendResponse(req, res, messageData, 'The Assistant run failed');
}
return sendResponse(req, res, finalEvent);
};
};
module.exports = { createErrorHandler };

View File

@@ -1,8 +1,9 @@
const {
EModelEndpoint,
CacheKeys,
defaultAssistantsVersion,
SystemRoles,
EModelEndpoint,
defaultOrderQuery,
defaultAssistantsVersion,
} = require('librechat-data-provider');
const {
initializeClient: initAzureClient,
@@ -227,7 +228,7 @@ const fetchAssistants = async ({ req, res, overrideEndpoint }) => {
body = await listAssistantsForAzure({ req, res, version, azureConfig, query });
}
if (req.user.role === 'ADMIN') {
if (req.user.role === SystemRoles.ADMIN) {
return body;
} else if (!req.app.locals[endpoint]) {
return body;

View File

@@ -1,26 +1,22 @@
const User = require('~/models/User');
const { setAuthTokens } = require('~/server/services/AuthService');
const { logger } = require('~/config');
const loginController = async (req, res) => {
try {
const user = await User.findById(req.user._id);
// If user doesn't exist, return error
if (!user) {
// typeof user !== User) { // this doesn't seem to resolve the User type ??
if (!req.user) {
return res.status(400).json({ message: 'Invalid credentials' });
}
const token = await setAuthTokens(user._id, res);
const { password: _, __v, ...user } = req.user;
user.id = user._id.toString();
const token = await setAuthTokens(req.user._id, res);
return res.status(200).send({ token, user });
} catch (err) {
logger.error('[loginController]', err);
return res.status(500).json({ message: 'Something went wrong' });
}
// Generic error messages are safer
return res.status(500).json({ message: 'Something went wrong' });
};
module.exports = {

View File

@@ -6,16 +6,16 @@ const axios = require('axios');
const express = require('express');
const passport = require('passport');
const mongoSanitize = require('express-mongo-sanitize');
const { jwtLogin, passportLogin } = require('~/strategies');
const { connectDb, indexSync } = require('~/lib/db');
const { isEnabled } = require('~/server/utils');
const { ldapLogin } = require('~/strategies');
const { logger } = require('~/config');
const validateImageRequest = require('./middleware/validateImageRequest');
const errorController = require('./controllers/ErrorController');
const { jwtLogin, passportLogin } = require('~/strategies');
const configureSocialLogins = require('./socialLogins');
const { connectDb, indexSync } = require('~/lib/db');
const AppService = require('./services/AppService');
const noIndex = require('./middleware/noIndex');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const { ldapLogin } = require('~/strategies');
const routes = require('./routes');
const { PORT, HOST, ALLOW_SOCIAL_LOGIN } = process.env ?? {};
@@ -61,7 +61,7 @@ const startServer = async () => {
passport.use(passportLogin());
// LDAP Auth
if (process.env.LDAP_URL && process.env.LDAP_BIND_DN && process.env.LDAP_USER_SEARCH_BASE) {
if (process.env.LDAP_URL && process.env.LDAP_USER_SEARCH_BASE) {
passport.use(ldapLogin);
}
@@ -81,6 +81,7 @@ const startServer = async () => {
app.use('/api/convos', routes.convos);
app.use('/api/presets', routes.presets);
app.use('/api/prompts', routes.prompts);
app.use('/api/categories', routes.categories);
app.use('/api/tokenizer', routes.tokenizer);
app.use('/api/endpoints', routes.endpoints);
app.use('/api/balance', routes.balance);
@@ -91,9 +92,11 @@ const startServer = async () => {
app.use('/api/files', await routes.files.initialize());
app.use('/images/', validateImageRequest, routes.staticRoute);
app.use('/api/share', routes.share);
app.use('/api/roles', routes.roles);
app.use('/api/tags', routes.tags);
app.use((req, res) => {
res.status(404).sendFile(path.join(app.locals.paths.dist, 'index.html'));
res.sendFile(path.join(app.locals.paths.dist, 'index.html'));
});
app.listen(port, host, () => {

View File

@@ -1,31 +1,39 @@
const { isAssistantsEndpoint } = require('librechat-data-provider');
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
const { saveMessage, getConvo, getConvoTitle } = require('~/models');
const clearPendingReq = require('~/cache/clearPendingReq');
const abortControllers = require('./abortControllers');
const { saveMessage, getConvo } = require('~/models');
const spendTokens = require('~/models/spendTokens');
const { abortRun } = require('./abortRun');
const { logger } = require('~/config');
async function abortMessage(req, res) {
let { abortKey, conversationId, endpoint } = req.body;
if (!abortKey && conversationId) {
abortKey = conversationId;
}
let { abortKey, endpoint } = req.body;
if (isAssistantsEndpoint(endpoint)) {
return await abortRun(req, res);
}
const conversationId = abortKey?.split(':')?.[0] ?? req.user.id;
if (!abortControllers.has(abortKey) && abortControllers.has(conversationId)) {
abortKey = conversationId;
}
if (!abortControllers.has(abortKey) && !res.headersSent) {
return res.status(204).send({ message: 'Request not found' });
}
const { abortController } = abortControllers.get(abortKey);
const { abortController } = abortControllers.get(abortKey) ?? {};
if (!abortController) {
return res.status(204).send({ message: 'Request not found' });
}
const finalEvent = await abortController.abortCompletion();
logger.debug('[abortMessage] Aborted request', { abortKey });
logger.debug(
`[abortMessage] ID: ${req.user.id} | ${req.user.email} | Aborted request: ` +
JSON.stringify({ abortKey }),
);
abortControllers.delete(abortKey);
if (res.headersSent && finalEvent) {
@@ -50,12 +58,35 @@ const handleAbort = () => {
};
};
const createAbortController = (req, res, getAbortData) => {
const createAbortController = (req, res, getAbortData, getReqData) => {
const abortController = new AbortController();
const { endpointOption } = req.body;
const onStart = (userMessage) => {
abortController.getAbortData = function () {
return getAbortData();
};
/**
* @param {TMessage} userMessage
* @param {string} responseMessageId
*/
const onStart = (userMessage, responseMessageId) => {
sendMessage(res, { message: userMessage, created: true });
const abortKey = userMessage?.conversationId ?? req.user.id;
const prevRequest = abortControllers.get(abortKey);
if (prevRequest && prevRequest?.abortController) {
const data = prevRequest.abortController.getAbortData();
getReqData({ userMessage: data?.userMessage });
const addedAbortKey = `${abortKey}:${responseMessageId}`;
abortControllers.set(addedAbortKey, { abortController, ...endpointOption });
res.on('finish', function () {
abortControllers.delete(addedAbortKey);
});
return;
}
abortControllers.set(abortKey, { abortController, ...endpointOption });
res.on('finish', function () {
@@ -65,7 +96,8 @@ const createAbortController = (req, res, getAbortData) => {
abortController.abortCompletion = async function () {
abortController.abort();
const { conversationId, userMessage, promptTokens, ...responseData } = getAbortData();
const { conversationId, userMessage, userMessagePromise, promptTokens, ...responseData } =
getAbortData();
const completionTokens = await countTokens(responseData?.text ?? '');
const user = req.user.id;
@@ -87,12 +119,26 @@ const createAbortController = (req, res, getAbortData) => {
{ promptTokens, completionTokens },
);
saveMessage({ ...responseMessage, user });
saveMessage(
req,
{ ...responseMessage, user },
{ context: 'api/server/middleware/abortMiddleware.js' },
);
let conversation;
if (userMessagePromise) {
const resolved = await userMessagePromise;
conversation = resolved?.conversation;
}
if (!conversation) {
conversation = await getConvo(req.user.id, conversationId);
}
return {
title: await getConvoTitle(user, conversationId),
title: conversation && !conversation.title ? null : conversation?.title || 'New Chat',
final: true,
conversation: await getConvo(user, conversationId),
conversation,
requestMessage: userMessage,
responseMessage: responseMessage,
};
@@ -151,7 +197,7 @@ const handleAbortError = async (res, req, error, data) => {
}
};
await sendError(res, options, callback);
await sendError(req, res, options, callback);
};
if (partialText && partialText.length > 5) {

View File

@@ -19,7 +19,8 @@ const validateAssistant = async (req, res, next) => {
}
const { supportedIds, excludedIds } = assistantsConfig;
const error = { message: 'Assistant not supported' };
const error = { message: 'validateAssistant: Assistant not supported' };
if (supportedIds?.length && !supportedIds.includes(assistant_id)) {
return await handleAbortError(res, req, error, {
sender: 'System',

View File

@@ -1,3 +1,4 @@
const { SystemRoles } = require('librechat-data-provider');
const { getAssistant } = require('~/models/Assistant');
/**
@@ -11,7 +12,7 @@ const { getAssistant } = require('~/models/Assistant');
* @returns {Promise<void>}
*/
const validateAuthor = async ({ req, openai, overrideEndpoint, overrideAssistantId }) => {
if (req.user.role === 'ADMIN') {
if (req.user.role === SystemRoles.ADMIN) {
return;
}

View File

@@ -0,0 +1,28 @@
const { SystemRoles } = require('librechat-data-provider');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
/**
* Checks if the user can delete their account
*
* @async
* @function
* @param {Object} req - Express request object
* @param {Object} res - Express response object
* @param {Function} next - Next middleware function
*
* @returns {Promise<function|Object>} - Returns a Promise which when resolved calls next middleware if the user can delete their account
*/
const canDeleteAccount = async (req, res, next = () => {}) => {
const { user } = req;
const { ALLOW_ACCOUNT_DELETION = true } = process.env;
if (user?.role === SystemRoles.ADMIN || isEnabled(ALLOW_ACCOUNT_DELETION)) {
return next();
} else {
logger.error(`[User] [Delete Account] [User cannot delete account] [User: ${user?.id}]`);
return res.status(403).send({ message: 'You do not have permission to delete this account' });
}
};
module.exports = canDeleteAccount;

View File

@@ -1,11 +1,11 @@
const Keyv = require('keyv');
const uap = require('ua-parser-js');
const { ViolationTypes } = require('librechat-data-provider');
const { isEnabled, removePorts } = require('../utils');
const { isEnabled, removePorts } = require('~/server/utils');
const keyvMongo = require('~/cache/keyvMongo');
const denyRequest = require('./denyRequest');
const { getLogStores } = require('~/cache');
const User = require('~/models/User');
const { findUser } = require('~/models');
const banCache = new Keyv({ store: keyvMongo, namespace: ViolationTypes.BAN, ttl: 0 });
const message = 'Your account has been temporarily banned due to violations of our service.';
@@ -55,7 +55,7 @@ const checkBan = async (req, res, next = () => {}) => {
let userId = req.user?.id ?? req.user?._id ?? null;
if (!userId && req?.body?.email) {
const user = await User.findOne({ email: req.body.email }, '_id').lean();
const user = await findUser({ email: req.body.email }, '_id');
userId = user?._id ? user._id.toString() : userId;
}

View File

@@ -41,10 +41,14 @@ const denyRequest = async (req, res, errorMessage) => {
const shouldSaveMessage = _convoId && parentMessageId && parentMessageId !== Constants.NO_PARENT;
if (shouldSaveMessage) {
await saveMessage({ ...userMessage, user: req.user.id });
await saveMessage(
req,
{ ...userMessage, user: req.user.id },
{ context: `api/server/middleware/denyRequest.js - ${responseText}` },
);
}
return await sendError(res, {
return await sendError(req, res, {
sender: getResponseSender(req.body),
messageId: crypto.randomUUID(),
conversationId,

View File

@@ -1,47 +1,45 @@
const abortMiddleware = require('./abortMiddleware');
const checkBan = require('./checkBan');
const checkDomainAllowed = require('./checkDomainAllowed');
const uaParser = require('./uaParser');
const setHeaders = require('./setHeaders');
const loginLimiter = require('./loginLimiter');
const validateModel = require('./validateModel');
const requireJwtAuth = require('./requireJwtAuth');
const requireLdapAuth = require('./requireLdapAuth');
const uploadLimiters = require('./uploadLimiters');
const registerLimiter = require('./registerLimiter');
const messageLimiters = require('./messageLimiters');
const requireLocalAuth = require('./requireLocalAuth');
const validateEndpoint = require('./validateEndpoint');
const concurrentLimiter = require('./concurrentLimiter');
const validateMessageReq = require('./validateMessageReq');
const buildEndpointOption = require('./buildEndpointOption');
const validatePasswordReset = require('./validatePasswordReset');
const validateRegistration = require('./validateRegistration');
const validateImageRequest = require('./validateImageRequest');
const buildEndpointOption = require('./buildEndpointOption');
const validateMessageReq = require('./validateMessageReq');
const checkDomainAllowed = require('./checkDomainAllowed');
const concurrentLimiter = require('./concurrentLimiter');
const validateEndpoint = require('./validateEndpoint');
const requireLocalAuth = require('./requireLocalAuth');
const canDeleteAccount = require('./canDeleteAccount');
const requireLdapAuth = require('./requireLdapAuth');
const abortMiddleware = require('./abortMiddleware');
const requireJwtAuth = require('./requireJwtAuth');
const validateModel = require('./validateModel');
const moderateText = require('./moderateText');
const setHeaders = require('./setHeaders');
const limiters = require('./limiters');
const uaParser = require('./uaParser');
const checkBan = require('./checkBan');
const noIndex = require('./noIndex');
const importLimiters = require('./importLimiters');
const roles = require('./roles');
module.exports = {
...uploadLimiters,
...abortMiddleware,
...messageLimiters,
...limiters,
...roles,
noIndex,
checkBan,
uaParser,
setHeaders,
loginLimiter,
moderateText,
validateModel,
requireJwtAuth,
requireLdapAuth,
registerLimiter,
requireLocalAuth,
canDeleteAccount,
validateEndpoint,
concurrentLimiter,
checkDomainAllowed,
validateMessageReq,
buildEndpointOption,
validateRegistration,
validateImageRequest,
validateModel,
moderateText,
noIndex,
...importLimiters,
checkDomainAllowed,
validatePasswordReset,
};

View File

@@ -0,0 +1,22 @@
const createTTSLimiters = require('./ttsLimiters');
const createSTTLimiters = require('./sttLimiters');
const loginLimiter = require('./loginLimiter');
const importLimiters = require('./importLimiters');
const uploadLimiters = require('./uploadLimiters');
const registerLimiter = require('./registerLimiter');
const messageLimiters = require('./messageLimiters');
const verifyEmailLimiter = require('./verifyEmailLimiter');
const resetPasswordLimiter = require('./resetPasswordLimiter');
module.exports = {
...uploadLimiters,
...importLimiters,
...messageLimiters,
loginLimiter,
registerLimiter,
createTTSLimiters,
createSTTLimiters,
verifyEmailLimiter,
resetPasswordLimiter,
};

View File

@@ -1,6 +1,6 @@
const rateLimit = require('express-rate-limit');
const { logViolation } = require('../../cache');
const { removePorts } = require('../utils');
const { removePorts } = require('~/server/utils');
const { logViolation } = require('~/cache');
const { LOGIN_WINDOW = 5, LOGIN_MAX = 7, LOGIN_VIOLATION_SCORE: score } = process.env;
const windowMs = LOGIN_WINDOW * 60 * 1000;

View File

@@ -1,6 +1,6 @@
const rateLimit = require('express-rate-limit');
const { logViolation } = require('../../cache');
const denyRequest = require('./denyRequest');
const denyRequest = require('~/server/middleware/denyRequest');
const { logViolation } = require('~/cache');
const {
MESSAGE_IP_MAX = 40,

View File

@@ -1,6 +1,6 @@
const rateLimit = require('express-rate-limit');
const { logViolation } = require('../../cache');
const { removePorts } = require('../utils');
const { removePorts } = require('~/server/utils');
const { logViolation } = require('~/cache');
const { REGISTER_WINDOW = 60, REGISTER_MAX = 5, REGISTRATION_VIOLATION_SCORE: score } = process.env;
const windowMs = REGISTER_WINDOW * 60 * 1000;

View File

@@ -0,0 +1,35 @@
const rateLimit = require('express-rate-limit');
const { ViolationTypes } = require('librechat-data-provider');
const { removePorts } = require('~/server/utils');
const { logViolation } = require('~/cache');
const {
RESET_PASSWORD_WINDOW = 2,
RESET_PASSWORD_MAX = 2,
RESET_PASSWORD_VIOLATION_SCORE: score,
} = process.env;
const windowMs = RESET_PASSWORD_WINDOW * 60 * 1000;
const max = RESET_PASSWORD_MAX;
const windowInMinutes = windowMs / 60000;
const message = `Too many attempts, please try again after ${windowInMinutes} minute(s)`;
const handler = async (req, res) => {
const type = ViolationTypes.RESET_PASSWORD_LIMIT;
const errorMessage = {
type,
max,
windowInMinutes,
};
await logViolation(req, res, type, errorMessage, score);
return res.status(429).json({ message });
};
const resetPasswordLimiter = rateLimit({
windowMs,
max,
handler,
keyGenerator: removePorts,
});
module.exports = resetPasswordLimiter;

View File

@@ -0,0 +1,35 @@
const rateLimit = require('express-rate-limit');
const { ViolationTypes } = require('librechat-data-provider');
const { removePorts } = require('~/server/utils');
const { logViolation } = require('~/cache');
const {
VERIFY_EMAIL_WINDOW = 2,
VERIFY_EMAIL_MAX = 2,
VERIFY_EMAIL_VIOLATION_SCORE: score,
} = process.env;
const windowMs = VERIFY_EMAIL_WINDOW * 60 * 1000;
const max = VERIFY_EMAIL_MAX;
const windowInMinutes = windowMs / 60000;
const message = `Too many attempts, please try again after ${windowInMinutes} minute(s)`;
const handler = async (req, res) => {
const type = ViolationTypes.VERIFY_EMAIL_LIMIT;
const errorMessage = {
type,
max,
windowInMinutes,
};
await logViolation(req, res, type, errorMessage, score);
return res.status(429).json({ message });
};
const verifyEmailLimiter = rateLimit({
windowMs,
max,
handler,
keyGenerator: removePorts,
});
module.exports = verifyEmailLimiter;

View File

@@ -13,7 +13,7 @@ const requireLdapAuth = (req, res, next) => {
console.log({
title: '(requireLdapAuth) Error: No user',
});
return res.status(422).send(info);
return res.status(404).send(info);
}
req.user = user;
next();

View File

@@ -21,7 +21,13 @@ const requireLocalAuth = (req, res, next) => {
log({
title: '(requireLocalAuth) Error: No user',
});
return res.status(422).send(info);
return res.status(404).send(info);
}
if (info && info.message) {
log({
title: '(requireLocalAuth) Error: ' + info.message,
});
return res.status(422).send({ message: info.message });
}
req.user = user;
next();

View File

@@ -0,0 +1,14 @@
const { SystemRoles } = require('librechat-data-provider');
function checkAdmin(req, res, next) {
try {
if (req.user.role !== SystemRoles.ADMIN) {
return res.status(403).json({ message: 'Forbidden' });
}
next();
} catch (error) {
res.status(500).json({ message: 'Internal Server Error' });
}
}
module.exports = checkAdmin;

View File

@@ -0,0 +1,52 @@
const { SystemRoles } = require('librechat-data-provider');
const { getRoleByName } = require('~/models/Role');
/**
* Middleware to check if a user has one or more required permissions, optionally based on `req.body` properties.
*
* @param {PermissionTypes} permissionType - The type of permission to check.
* @param {Permissions[]} permissions - The list of specific permissions to check.
* @param {Record<Permissions, string[]>} [bodyProps] - An optional object where keys are permissions and values are arrays of `req.body` properties to check.
* @returns {Function} Express middleware function.
*/
const generateCheckAccess = (permissionType, permissions, bodyProps = {}) => {
return async (req, res, next) => {
try {
const { user } = req;
if (!user) {
return res.status(401).json({ message: 'Authorization required' });
}
if (user.role === SystemRoles.ADMIN) {
return next();
}
const role = await getRoleByName(user.role);
if (role && role[permissionType]) {
const hasAnyPermission = permissions.some((permission) => {
if (role[permissionType][permission]) {
return true;
}
if (bodyProps[permission] && req.body) {
return bodyProps[permission].some((prop) =>
Object.prototype.hasOwnProperty.call(req.body, prop),
);
}
return false;
});
if (hasAnyPermission) {
return next();
}
}
return res.status(403).json({ message: 'Forbidden: Insufficient permissions' });
} catch (error) {
return res.status(500).json({ message: `Server error: ${error.message}` });
}
};
};
module.exports = generateCheckAccess;

View File

@@ -0,0 +1,7 @@
const checkAdmin = require('./checkAdmin');
const generateCheckAccess = require('./generateCheckAccess');
module.exports = {
checkAdmin,
generateCheckAccess,
};

View File

@@ -1,7 +0,0 @@
const createTTSLimiters = require('./ttsLimiters');
const createSTTLimiters = require('./sttLimiters');
module.exports = {
createTTSLimiters,
createSTTLimiters,
};

View File

@@ -31,10 +31,14 @@ function validateImageRequest(req, res, next) {
return res.status(403).send('Access Denied');
}
if (req.path.includes(payload.id)) {
const fullPath = decodeURIComponent(req.originalUrl);
const pathPattern = new RegExp(`^/images/${payload.id}/[^/]+$`);
if (pathPattern.test(fullPath)) {
logger.debug('[validateImageRequest] Image request validated');
next();
} else {
logger.warn('[validateImageRequest] Invalid image path');
res.status(403).send('Access Denied');
}
}

View File

@@ -1,4 +1,4 @@
const { getConvo } = require('../../models');
const { getConvo } = require('~/models');
// Middleware to validate conversationId and user relationship
const validateMessageReq = async (req, res, next) => {

View File

@@ -0,0 +1,13 @@
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
function validatePasswordReset(req, res, next) {
if (isEnabled(process.env.ALLOW_PASSWORD_RESET)) {
next();
} else {
logger.warn(`Password reset attempt while not allowed. IP: ${req.ip}`);
res.status(403).send('Password reset is not allowed.');
}
}
module.exports = validatePasswordReset;

View File

@@ -1,6 +1,7 @@
const { isEnabled } = require('~/server/utils');
function validateRegistration(req, res, next) {
const setting = process.env.ALLOW_REGISTRATION?.toLowerCase();
if (setting === 'true') {
if (isEnabled(process.env.ALLOW_REGISTRATION)) {
next();
} else {
res.status(403).send('Registration is not allowed.');

View File

@@ -25,6 +25,7 @@ afterEach(() => {
delete process.env.DOMAIN_SERVER;
delete process.env.ALLOW_REGISTRATION;
delete process.env.ALLOW_SOCIAL_LOGIN;
delete process.env.ALLOW_PASSWORD_RESET;
delete process.env.LDAP_URL;
delete process.env.LDAP_BIND_DN;
delete process.env.LDAP_BIND_CREDENTIALS;
@@ -55,6 +56,7 @@ describe.skip('GET /', () => {
process.env.DOMAIN_SERVER = 'http://test-server.com';
process.env.ALLOW_REGISTRATION = 'true';
process.env.ALLOW_SOCIAL_LOGIN = 'true';
process.env.ALLOW_PASSWORD_RESET = 'true';
process.env.LDAP_URL = 'Test LDAP URL';
process.env.LDAP_BIND_DN = 'Test LDAP Bind DN';
process.env.LDAP_BIND_CREDENTIALS = 'Test LDAP Bind Credentials';
@@ -74,10 +76,13 @@ describe.skip('GET /', () => {
openidLoginEnabled: true,
openidLabel: 'Test OpenID',
openidImageUrl: 'http://test-server.com',
ldapLoginEnabled: true,
ldap: {
enabled: true,
},
serverDomain: 'http://test-server.com',
emailLoginEnabled: 'true',
registrationEnabled: 'true',
passwordResetEnabled: 'true',
socialLoginEnabled: 'true',
});
});

View File

@@ -0,0 +1,55 @@
const request = require('supertest');
const express = require('express');
const { getLdapConfig } = require('~/server/services/Config/ldap');
const { isEnabled } = require('~/server/utils');
jest.mock('~/server/services/Config/ldap');
jest.mock('~/server/utils');
const app = express();
// Mock the route handler
app.get('/api/config', (req, res) => {
const ldapConfig = getLdapConfig();
res.json({ ldap: ldapConfig });
});
describe('LDAP Config Tests', () => {
afterEach(() => {
jest.resetAllMocks();
});
it('should return LDAP config with username property when LDAP_LOGIN_USES_USERNAME is enabled', async () => {
getLdapConfig.mockReturnValue({ enabled: true, username: true });
isEnabled.mockReturnValue(true);
const response = await request(app).get('/api/config');
expect(response.statusCode).toBe(200);
expect(response.body.ldap).toEqual({
enabled: true,
username: true,
});
});
it('should return LDAP config without username property when LDAP_LOGIN_USES_USERNAME is not enabled', async () => {
getLdapConfig.mockReturnValue({ enabled: true });
isEnabled.mockReturnValue(false);
const response = await request(app).get('/api/config');
expect(response.statusCode).toBe(200);
expect(response.body.ldap).toEqual({
enabled: true,
});
});
it('should not return LDAP config when LDAP is not enabled', async () => {
getLdapConfig.mockReturnValue(undefined);
const response = await request(app).get('/api/config');
expect(response.statusCode).toBe(200);
expect(response.body.ldap).toBeUndefined();
});
});

View File

@@ -51,8 +51,8 @@ router.post('/', setHeaders, async (req, res) => {
});
if (!overrideParentMessageId) {
await saveMessage({ ...userMessage, user: req.user.id });
await saveConvo(req.user.id, {
await saveMessage(req, { ...userMessage, user: req.user.id });
await saveConvo(req, {
...userMessage,
...endpointOption,
conversationId,
@@ -93,7 +93,7 @@ const ask = async ({
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > 500) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
saveMessage(req, {
messageId: responseMessageId,
sender: endpointOption?.jailbreak ? 'Sydney' : 'BingAI',
conversationId,
@@ -159,7 +159,7 @@ const ask = async ({
isCreatedByUser: false,
};
await saveMessage({ ...responseMessage, user });
await saveMessage(req, { ...responseMessage, user });
responseMessage.messageId = newResponseMessageId;
// STEP2 update the conversation
@@ -183,7 +183,7 @@ const ask = async ({
}
}
await saveConvo(user, conversationUpdate);
await saveConvo(req, conversationUpdate);
conversationId = newConversationId;
// STEP3 update the user message
@@ -192,7 +192,7 @@ const ask = async ({
// If response has parentMessageId, the fake userMessage.messageId should be updated to the real one.
if (!overrideParentMessageId) {
await saveMessage({
await saveMessage(req, {
...userMessage,
user,
messageId: userMessageId,
@@ -213,7 +213,7 @@ const ask = async ({
if (userParentMessageId == Constants.NO_PARENT) {
// const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: responseMessage });
const title = await response.details.title;
await saveConvo(user, {
await saveConvo(req, {
conversationId: conversationId,
title,
});
@@ -229,7 +229,7 @@ const ask = async ({
isCreatedByUser: false,
text: `${getPartialMessage() ?? ''}\n\nError message: "${error.message}"`,
};
await saveMessage({ ...errorMessage, user });
await saveMessage(req, { ...errorMessage, user });
handleError(res, errorMessage);
}
};

View File

@@ -70,8 +70,8 @@ router.post('/', setHeaders, async (req, res) => {
});
if (!overrideParentMessageId) {
await saveMessage({ ...userMessage, user: req.user.id });
await saveConvo(req.user.id, {
await saveMessage(req, { ...userMessage, user: req.user.id });
await saveConvo(req, {
...userMessage,
...endpointOption,
conversationId,
@@ -118,7 +118,7 @@ const ask = async ({
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > 500) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
saveMessage(req, {
messageId: responseMessageId,
sender: model,
conversationId,
@@ -197,7 +197,7 @@ const ask = async ({
isCreatedByUser: false,
};
await saveMessage({ ...responseMessage, user });
await saveMessage(req, { ...responseMessage, user });
responseMessage.messageId = newResponseMessageId;
let conversationUpdate = {
@@ -216,12 +216,12 @@ const ask = async ({
conversationUpdate.invocationId = response.invocationId;
}
await saveConvo(user, conversationUpdate);
await saveConvo(req, conversationUpdate);
userMessage.messageId = newUserMessageId;
// If response has parentMessageId, the fake userMessage.messageId should be updated to the real one.
if (!overrideParentMessageId) {
await saveMessage({
await saveMessage(req, {
...userMessage,
user,
messageId: userMessageId,
@@ -245,7 +245,7 @@ const ask = async ({
response: responseMessage,
});
await saveConvo(user, {
await saveConvo(req, {
conversationId: conversationId,
title,
});
@@ -266,7 +266,7 @@ const ask = async ({
isCreatedByUser: false,
};
saveMessage({ ...responseMessage, user });
saveMessage(req, { ...responseMessage, user });
return {
title: await getConvoTitle(user, conversationId),
@@ -288,7 +288,7 @@ const ask = async ({
model,
isCreatedByUser: false,
};
await saveMessage({ ...errorMessage, user });
await saveMessage(req, { ...errorMessage, user });
handleError(res, errorMessage);
}
}

View File

@@ -1,6 +1,6 @@
const express = require('express');
const AskController = require('~/server/controllers/AskController');
const { initializeClient } = require('~/server/services/Endpoints/google');
const { initializeClient, addTitle } = require('~/server/services/Endpoints/google');
const {
setHeaders,
handleAbort,
@@ -20,7 +20,7 @@ router.post(
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AskController(req, res, next, initializeClient);
await AskController(req, res, next, initializeClient, addTitle);
},
);

View File

@@ -1,10 +1,11 @@
const express = require('express');
const throttle = require('lodash/throttle');
const { getResponseSender, Constants } = require('librechat-data-provider');
const { getResponseSender, Constants, CacheKeys, Time } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const { saveMessage, updateMessage } = require('~/models');
const { getLogStores } = require('~/cache');
const {
handleAbort,
createAbortController,
@@ -41,6 +42,7 @@ router.post(
logger.debug('[/ask/gptPlugins]', { text, conversationId, ...endpointOption });
let userMessage;
let userMessagePromise;
let promptTokens;
let userMessageId;
let responseMessageId;
@@ -58,6 +60,8 @@ router.post(
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
@@ -68,7 +72,15 @@ router.post(
}
};
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
const messageCache = getLogStores(CacheKeys.MESSAGES);
const throttledCacheSet = throttle(
(text) => {
messageCache.set(responseMessageId, text, Time.FIVE_MINUTES);
},
3000,
{ trailing: false },
);
let streaming = null;
let timer = null;
@@ -82,18 +94,7 @@ router.post(
clearTimeout(timer);
}
throttledSaveMessage({
messageId: responseMessageId,
sender,
conversationId,
parentMessageId: overrideParentMessageId || userMessageId,
text: partialText,
model: endpointOption.modelOptions.model,
unfinished: true,
error: false,
plugins,
user,
});
throttledCacheSet(partialText);
streaming = new Promise((resolve) => {
timer = setTimeout(() => {
@@ -148,18 +149,10 @@ router.post(
}
};
const onChainEnd = () => {
saveMessage({ ...userMessage, user });
sendIntermediateMessage(res, {
plugins,
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
};
const getAbortData = () => ({
sender,
conversationId,
userMessagePromise,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
@@ -167,12 +160,27 @@ router.post(
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
const onChainEnd = () => {
if (!client.skipSaveUserMessage) {
saveMessage(
req,
{ ...userMessage, user },
{ context: 'api/server/routes/ask/gptPlugins.js - onChainEnd' },
);
}
sendIntermediateMessage(res, {
plugins,
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
};
let response = await client.sendMessage(text, {
user,
conversationId,
@@ -189,7 +197,6 @@ router.post(
progressCallback,
progressOptions: {
res,
text,
// parentMessageId: overrideParentMessageId || userMessageId,
plugins,
},
@@ -202,13 +209,14 @@ router.post(
logger.debug('[/ask/gptPlugins]', response);
response.plugins = plugins.map((p) => ({ ...p, loading: false }));
await saveMessage({ ...response, user });
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
title: conversation.title,
final: true,
conversation: await getConvo(user, conversationId),
conversation,
requestMessage: userMessage,
responseMessage: response,
});
@@ -221,6 +229,15 @@ router.post(
client,
});
}
response.plugins = plugins.map((p) => ({ ...p, loading: false }));
if (response.plugins?.length > 0) {
await updateMessage(
req,
{ ...response, user },
{ context: 'api/server/routes/ask/gptPlugins.js - save plugins used' },
);
}
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {

View File

@@ -1,26 +1,27 @@
const express = require('express');
const {
resetPasswordRequestController,
resetPasswordController,
refreshController,
registrationController,
} = require('../controllers/AuthController');
const { loginController } = require('../controllers/auth/LoginController');
const { logoutController } = require('../controllers/auth/LogoutController');
resetPasswordController,
resetPasswordRequestController,
} = require('~/server/controllers/AuthController');
const { loginController } = require('~/server/controllers/auth/LoginController');
const { logoutController } = require('~/server/controllers/auth/LogoutController');
const {
checkBan,
loginLimiter,
registerLimiter,
requireJwtAuth,
registerLimiter,
requireLdapAuth,
requireLocalAuth,
resetPasswordLimiter,
validateRegistration,
} = require('../middleware');
validatePasswordReset,
} = require('~/server/middleware');
const router = express.Router();
const ldapAuth =
!!process.env.LDAP_URL && !!process.env.LDAP_BIND_DN && !!process.env.LDAP_USER_SEARCH_BASE;
const ldapAuth = !!process.env.LDAP_URL && !!process.env.LDAP_USER_SEARCH_BASE;
//Local
router.post('/logout', requireJwtAuth, logoutController);
router.post(
@@ -32,7 +33,13 @@ router.post(
);
router.post('/refresh', refreshController);
router.post('/register', registerLimiter, checkBan, validateRegistration, registrationController);
router.post('/requestPasswordReset', resetPasswordRequestController);
router.post('/resetPassword', resetPasswordController);
router.post(
'/requestPasswordReset',
resetPasswordLimiter,
checkBan,
validatePasswordReset,
resetPasswordRequestController,
);
router.post('/resetPassword', checkBan, validatePasswordReset, resetPasswordController);
module.exports = router;

View File

@@ -0,0 +1,15 @@
const express = require('express');
const router = express.Router();
const { requireJwtAuth } = require('~/server/middleware');
const { getCategories } = require('~/models/Categories');
router.get('/', requireJwtAuth, async (req, res) => {
try {
const categories = await getCategories();
res.status(200).send(categories);
} catch (error) {
res.status(500).send({ message: 'Failed to retrieve categories', error: error.message });
}
});
module.exports = router;

View File

@@ -1,20 +1,41 @@
const express = require('express');
const { defaultSocialLogins } = require('librechat-data-provider');
const { CacheKeys, defaultSocialLogins } = require('librechat-data-provider');
const { getLdapConfig } = require('~/server/services/Config/ldap');
const { getProjectByName } = require('~/models/Project');
const { isEnabled } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
const router = express.Router();
const emailLoginEnabled =
process.env.ALLOW_EMAIL_LOGIN === undefined || isEnabled(process.env.ALLOW_EMAIL_LOGIN);
const passwordResetEnabled = isEnabled(process.env.ALLOW_PASSWORD_RESET);
const sharedLinksEnabled =
process.env.ALLOW_SHARED_LINKS === undefined || isEnabled(process.env.ALLOW_SHARED_LINKS);
const publicSharedLinksEnabled =
sharedLinksEnabled &&
(process.env.ALLOW_SHARED_LINKS_PUBLIC === undefined ||
isEnabled(process.env.ALLOW_SHARED_LINKS_PUBLIC));
router.get('/', async function (req, res) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedStartupConfig = await cache.get(CacheKeys.STARTUP_CONFIG);
if (cachedStartupConfig) {
res.send(cachedStartupConfig);
return;
}
const isBirthday = () => {
const today = new Date();
return today.getMonth() === 1 && today.getDate() === 11;
};
const ldapLoginEnabled =
!!process.env.LDAP_URL && !!process.env.LDAP_BIND_DN && !!process.env.LDAP_USER_SEARCH_BASE;
const instanceProject = await getProjectByName('instance', '_id');
const ldap = getLdapConfig();
try {
/** @type {TStartupConfig} */
const payload = {
@@ -32,16 +53,16 @@ router.get('/', async function (req, res) {
!!process.env.OPENID_SESSION_SECRET,
openidLabel: process.env.OPENID_BUTTON_LABEL || 'Continue with OpenID',
openidImageUrl: process.env.OPENID_IMAGE_URL,
ldapLoginEnabled,
serverDomain: process.env.DOMAIN_SERVER || 'http://localhost:3080',
emailLoginEnabled,
registrationEnabled: !ldapLoginEnabled && isEnabled(process.env.ALLOW_REGISTRATION),
registrationEnabled: !ldap?.enabled && isEnabled(process.env.ALLOW_REGISTRATION),
socialLoginEnabled: isEnabled(process.env.ALLOW_SOCIAL_LOGIN),
emailEnabled:
(!!process.env.EMAIL_SERVICE || !!process.env.EMAIL_HOST) &&
!!process.env.EMAIL_USERNAME &&
!!process.env.EMAIL_PASSWORD &&
!!process.env.EMAIL_FROM,
passwordResetEnabled,
checkBalance: isEnabled(process.env.CHECK_BALANCE),
showBirthdayIcon:
isBirthday() ||
@@ -50,12 +71,21 @@ router.get('/', async function (req, res) {
helpAndFaqURL: process.env.HELP_AND_FAQ_URL || 'https://librechat.ai',
interface: req.app.locals.interfaceConfig,
modelSpecs: req.app.locals.modelSpecs,
sharedLinksEnabled,
publicSharedLinksEnabled,
analyticsGtmId: process.env.ANALYTICS_GTM_ID,
instanceProjectId: instanceProject._id.toString(),
};
if (ldap) {
payload.ldap = ldap;
}
if (typeof process.env.CUSTOM_FOOTER === 'string') {
payload.customFooter = process.env.CUSTOM_FOOTER;
}
await cache.set(CacheKeys.STARTUP_CONFIG, payload);
return res.status(200).send(payload);
} catch (err) {
logger.error('Error in startup config', err);

View File

@@ -3,12 +3,12 @@ const express = require('express');
const { CacheKeys } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
const { IMPORT_CONVERSATION_JOB_NAME } = require('~/server/utils/import/jobDefinition');
const { storage, importFileFilter } = require('~/server/routes/files/multer');
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
const { forkConversation } = require('~/server/utils/import/fork');
const { importConversations } = require('~/server/utils/import');
const { createImportLimiters } = require('~/server/middleware');
const jobScheduler = require('~/server/utils/jobScheduler');
const { updateTagsForConversation } = require('~/models/ConversationTag');
const getLogStores = require('~/cache/getLogStores');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
@@ -31,8 +31,13 @@ router.get('/', async (req, res) => {
return res.status(400).json({ error: 'Invalid page size' });
}
const isArchived = req.query.isArchived === 'true';
const tags = req.query.tags
? Array.isArray(req.query.tags)
? req.query.tags
: [req.query.tags]
: undefined;
res.status(200).send(await getConvosByPage(req.user.id, pageNumber, pageSize, isArchived));
res.status(200).send(await getConvosByPage(req.user.id, pageNumber, pageSize, isArchived, tags));
});
router.get('/:conversationId', async (req, res) => {
@@ -105,7 +110,7 @@ router.post('/update', async (req, res) => {
const update = req.body.arg;
try {
const dbResponse = await saveConvo(req.user.id, update);
const dbResponse = await saveConvo(req, update, { context: 'POST /api/convos/update' });
res.status(201).json(dbResponse);
} catch (error) {
logger.error('Error updating conversation', error);
@@ -129,10 +134,9 @@ router.post(
upload.single('file'),
async (req, res) => {
try {
const filepath = req.file.path;
const job = await jobScheduler.now(IMPORT_CONVERSATION_JOB_NAME, filepath, req.user.id);
res.status(201).json({ message: 'Import started', jobId: job.id });
/* TODO: optimize to return imported conversations and add manually */
await importConversations({ filepath: req.file.path, requestUserId: req.user.id });
res.status(201).json({ message: 'Conversation(s) imported successfully' });
} catch (error) {
logger.error('Error processing file', error);
res.status(500).send('Error processing file');
@@ -169,24 +173,9 @@ router.post('/fork', async (req, res) => {
}
});
// Get the status of an import job for polling
router.get('/import/jobs/:jobId', async (req, res) => {
try {
const { jobId } = req.params;
const { userId, ...jobStatus } = await jobScheduler.getJobStatus(jobId);
if (!jobStatus) {
return res.status(404).json({ message: 'Job not found.' });
}
if (userId !== req.user.id) {
return res.status(403).json({ message: 'Unauthorized' });
}
res.json(jobStatus);
} catch (error) {
logger.error('Error getting job details', error);
res.status(500).send('Error getting job details');
}
router.put('/tags/:conversationId', async (req, res) => {
const tag = await updateTagsForConversation(req.user.id, req.params.conversationId, req.body);
res.status(200).json(tag);
});
module.exports = router;

View File

@@ -1,19 +1,20 @@
const express = require('express');
const throttle = require('lodash/throttle');
const { getResponseSender } = require('librechat-data-provider');
const { getResponseSender, CacheKeys, Time } = require('librechat-data-provider');
const {
handleAbort,
createAbortController,
handleAbortError,
setHeaders,
handleAbort,
moderateText,
validateModel,
handleAbortError,
validateEndpoint,
buildEndpointOption,
moderateText,
createAbortController,
} = require('~/server/middleware');
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { saveMessage, updateMessage } = require('~/models');
const { getLogStores } = require('~/cache');
const { validateTools } = require('~/app');
const { logger } = require('~/config');
@@ -49,6 +50,7 @@ router.post(
});
let userMessage;
let userMessagePromise;
let promptTokens;
const sender = getResponseSender({
...endpointOption,
@@ -68,6 +70,8 @@ router.post(
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
@@ -76,7 +80,15 @@ router.post(
}
};
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
const messageCache = getLogStores(CacheKeys.MESSAGES);
const throttledCacheSet = throttle(
(text) => {
messageCache.set(responseMessageId, text, Time.FIVE_MINUTES);
},
3000,
{ trailing: false },
);
const {
onProgress: progressCallback,
sendIntermediateMessage,
@@ -87,42 +99,19 @@ router.post(
if (plugin.loading === true) {
plugin.loading = false;
}
throttledSaveMessage({
messageId: responseMessageId,
sender,
conversationId,
parentMessageId: overrideParentMessageId || userMessageId,
text: partialText,
model: endpointOption.modelOptions.model,
unfinished: true,
isEdited: true,
error: false,
user,
});
throttledCacheSet(partialText);
},
});
const onAgentAction = (action, start = false) => {
const formattedAction = formatAction(action);
plugin.inputs.push(formattedAction);
plugin.latest = formattedAction.plugin;
if (!start) {
saveMessage({ ...userMessage, user });
}
sendIntermediateMessage(res, {
plugin,
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
// logger.debug('PLUGIN ACTION', formattedAction);
};
const onChainEnd = (data) => {
let { intermediateSteps: steps } = data;
plugin.outputs = steps && steps[0].action ? formatSteps(steps) : 'An error occurred.';
plugin.loading = false;
saveMessage({ ...userMessage, user });
saveMessage(
req,
{ ...userMessage, user },
{ context: 'api/server/routes/ask/gptPlugins.js - onChainEnd' },
);
sendIntermediateMessage(res, {
plugin,
parentMessageId: userMessage.messageId,
@@ -134,6 +123,7 @@ router.post(
const getAbortData = () => ({
sender,
conversationId,
userMessagePromise,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
@@ -141,12 +131,31 @@ router.post(
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
const onAgentAction = (action, start = false) => {
const formattedAction = formatAction(action);
plugin.inputs.push(formattedAction);
plugin.latest = formattedAction.plugin;
if (!start && !client.skipSaveUserMessage) {
saveMessage(
req,
{ ...userMessage, user },
{ context: 'api/server/routes/ask/gptPlugins.js - onAgentAction' },
);
}
sendIntermediateMessage(res, {
plugin,
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
// logger.debug('PLUGIN ACTION', formattedAction);
};
let response = await client.sendMessage(text, {
user,
generation,
@@ -164,7 +173,6 @@ router.post(
progressCallback,
progressOptions: {
res,
text,
plugin,
// parentMessageId: overrideParentMessageId || userMessageId,
},
@@ -176,17 +184,26 @@ router.post(
}
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
response.plugin = { ...plugin, loading: false };
await saveMessage({ ...response, user });
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
title: conversation.title,
final: true,
conversation: await getConvo(user, conversationId),
conversation,
requestMessage: userMessage,
responseMessage: response,
});
res.end();
response.plugin = { ...plugin, loading: false };
await updateMessage(
req,
{ ...response, user },
{ context: 'api/server/routes/edit/gptPlugins.js' },
);
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {

View File

@@ -1,13 +1,11 @@
const express = require('express');
const { uaParser, checkBan, requireJwtAuth, createFileLimiters } = require('~/server/middleware');
const { createTTSLimiters, createSTTLimiters } = require('~/server/middleware/speech');
const { createMulterInstance } = require('./multer');
const files = require('./files');
const images = require('./images');
const avatar = require('./avatar');
const stt = require('./stt');
const tts = require('./tts');
const speech = require('./speech');
const initialize = async () => {
const router = express.Router();
@@ -15,11 +13,8 @@ const initialize = async () => {
router.use(checkBan);
router.use(uaParser);
/* Important: stt/tts routes must be added before the upload limiters */
const { sttIpLimiter, sttUserLimiter } = createSTTLimiters();
const { ttsIpLimiter, ttsUserLimiter } = createTTSLimiters();
router.use('/stt', sttIpLimiter, sttUserLimiter, stt);
router.use('/tts', ttsIpLimiter, ttsUserLimiter, tts);
/* Important: speech route must be added before the upload limiters */
router.use('/speech', speech);
const upload = await createMulterInstance();
const { fileUploadIpLimiter, fileUploadUserLimiter } = createFileLimiters();

View File

@@ -0,0 +1,10 @@
const express = require('express');
const router = express.Router();
const { getCustomConfigSpeech } = require('~/server/services/Files/Audio');
router.get('/get', async (req, res) => {
await getCustomConfigSpeech(req, res);
});
module.exports = router;

View File

@@ -0,0 +1,17 @@
const express = require('express');
const { createTTSLimiters, createSTTLimiters } = require('~/server/middleware');
const stt = require('./stt');
const tts = require('./tts');
const customConfigSpeech = require('./customConfigSpeech');
const router = express.Router();
const { sttIpLimiter, sttUserLimiter } = createSTTLimiters();
const { ttsIpLimiter, ttsUserLimiter } = createTTSLimiters();
router.use('/stt', sttIpLimiter, sttUserLimiter, stt);
router.use('/tts', ttsIpLimiter, ttsUserLimiter, tts);
router.use('/config', customConfigSpeech);
module.exports = router;

Some files were not shown because too many files have changed in this diff Show More