Compare commits
115 Commits
v0.7.4-rc1
...
v0.7.4
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cf69b7ef85 | ||
|
|
3a5f57fd2b | ||
|
|
22c8b6f699 | ||
|
|
1ff4841603 | ||
|
|
6fead1005b | ||
|
|
8a9476a795 | ||
|
|
59ae2914aa | ||
|
|
473859b0e2 | ||
|
|
e05a6d306d | ||
|
|
016ed866a3 | ||
|
|
6ea2628b56 | ||
|
|
cf393b1308 | ||
|
|
5c99d93744 | ||
|
|
b3821c1404 | ||
|
|
2bb0842650 | ||
|
|
b390ba781f | ||
|
|
01a88991ab | ||
|
|
270c6d2350 | ||
|
|
c2a79aee1b | ||
|
|
6879de0bf1 | ||
|
|
80773d0bce | ||
|
|
458dc9c88e | ||
|
|
3e0f95458f | ||
|
|
b6fe7e5570 | ||
|
|
e4ac42f034 | ||
|
|
020c4a1a0e | ||
|
|
5bbe424137 | ||
|
|
5baa95bd9a | ||
|
|
389b2a6cab | ||
|
|
11bfed7126 | ||
|
|
433d8f832a | ||
|
|
62854c91d3 | ||
|
|
cd326c4cf0 | ||
|
|
e15d0d354d | ||
|
|
b8a6efa649 | ||
|
|
27ab00c6a9 | ||
|
|
51cd847606 | ||
|
|
4ffdefc2a8 | ||
|
|
2d5f704695 | ||
|
|
3fd25920d4 | ||
|
|
d03f8285db | ||
|
|
e565e0faab | ||
|
|
d4d56281e3 | ||
|
|
14eff23b57 | ||
|
|
18fd8f1416 | ||
|
|
ba9cb71245 | ||
|
|
e5dfa06e6c | ||
|
|
0bd59c0efe | ||
|
|
344297021f | ||
|
|
620973436c | ||
|
|
422d1a2c91 | ||
|
|
2ad097647c | ||
|
|
9e7615f832 | ||
|
|
ee4dd1b2e9 | ||
|
|
f6125ccd59 | ||
|
|
1acd47a0f6 | ||
|
|
5d40d0a37a | ||
|
|
1c282d1517 | ||
|
|
73dbf3eb20 | ||
|
|
237a0de8b6 | ||
|
|
d5782ac66c | ||
|
|
d5d188eebf | ||
|
|
0a1d38e318 | ||
|
|
5ef71a7a36 | ||
|
|
326069d7a6 | ||
|
|
785430daf5 | ||
|
|
03fe361917 | ||
|
|
b34a4ddac1 | ||
|
|
7d5b03dd98 | ||
|
|
f959ee302c | ||
|
|
cd00df69bb | ||
|
|
a05e2c1dcc | ||
|
|
87bdbda10a | ||
|
|
605a8ae8c9 | ||
|
|
a724635998 | ||
|
|
6c306a662c | ||
|
|
55f8d9910e | ||
|
|
7edb54889b | ||
|
|
71d9e841b1 | ||
|
|
e76777d298 | ||
|
|
1edbfdbce2 | ||
|
|
1aad315de6 | ||
|
|
5d985746cb | ||
|
|
04654014b2 | ||
|
|
456793772b | ||
|
|
a87d4e0b75 | ||
|
|
a2fd975cd5 | ||
|
|
83619de158 | ||
|
|
b8f2bee3fc | ||
|
|
81292bb4dd | ||
|
|
ed5ee1f86f | ||
|
|
791b0139bc | ||
|
|
156c52e293 | ||
|
|
eef894e608 | ||
|
|
e2867eecc9 | ||
|
|
dd563e0796 | ||
|
|
c99cf1b4b1 | ||
|
|
b5081bfe86 | ||
|
|
aac01df80c | ||
|
|
24467dd626 | ||
|
|
b2b469bd3d | ||
|
|
cec2e57ee9 | ||
|
|
a8c874267f | ||
|
|
a53312bbd4 | ||
|
|
ab74685476 | ||
|
|
015215b790 | ||
|
|
4e4de88faa | ||
|
|
3172381bad | ||
|
|
54b1095239 | ||
|
|
0424f8fe55 | ||
|
|
4319c62e66 | ||
|
|
d3a0b862db | ||
|
|
5d8793c5d1 | ||
|
|
54db67449a | ||
|
|
0cd3c83328 |
59
.env.example
59
.env.example
@@ -80,14 +80,13 @@ PROXY=
|
||||
#============#
|
||||
|
||||
ANTHROPIC_API_KEY=user_provided
|
||||
# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
|
||||
# ANTHROPIC_MODELS=claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
|
||||
# ANTHROPIC_REVERSE_PROXY=
|
||||
|
||||
#============#
|
||||
# Azure #
|
||||
#============#
|
||||
|
||||
|
||||
# Note: these variables are DEPRECATED
|
||||
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
|
||||
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
|
||||
@@ -117,32 +116,38 @@ BINGAI_TOKEN=user_provided
|
||||
GOOGLE_KEY=user_provided
|
||||
# GOOGLE_REVERSE_PROXY=
|
||||
|
||||
# Gemini API
|
||||
# Gemini API (AI Studio)
|
||||
# GOOGLE_MODELS=gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
|
||||
|
||||
# Google Gemini Safety Settings
|
||||
# NOTE (Vertex AI): You do not have access to the BLOCK_NONE setting by default.
|
||||
# To use this restricted HarmBlockThreshold setting, you will need to either:
|
||||
# GOOGLE_TITLE_MODEL=gemini-pro
|
||||
|
||||
# Google Safety Settings
|
||||
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
|
||||
#
|
||||
# (a) Get access through an allowlist via your Google account team
|
||||
# (b) Switch your account type to monthly invoiced billing following this instruction:
|
||||
# https://cloud.google.com/billing/docs/how-to/invoiced-billing
|
||||
# For Vertex AI:
|
||||
# To use the BLOCK_NONE setting, you need either:
|
||||
# (a) Access through an allowlist via your Google account team, or
|
||||
# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
|
||||
#
|
||||
# For Gemini API (AI Studio):
|
||||
# BLOCK_NONE is available by default, no special account requirements.
|
||||
#
|
||||
# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
|
||||
#
|
||||
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
|
||||
|
||||
|
||||
#============#
|
||||
# OpenAI #
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=user_provided
|
||||
# OPENAI_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
# OPENAI_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
@@ -164,7 +169,7 @@ DEBUG_OPENAI=false
|
||||
|
||||
ASSISTANTS_API_KEY=user_provided
|
||||
# ASSISTANTS_BASE_URL=
|
||||
# ASSISTANTS_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||
|
||||
#==========================#
|
||||
# Azure Assistants API #
|
||||
@@ -186,7 +191,7 @@ ASSISTANTS_API_KEY=user_provided
|
||||
# Plugins #
|
||||
#============#
|
||||
|
||||
# PLUGIN_MODELS=gpt-4o,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||
|
||||
DEBUG_PLUGINS=true
|
||||
|
||||
@@ -259,7 +264,6 @@ MEILI_NO_ANALYTICS=true
|
||||
MEILI_HOST=http://0.0.0.0:7700
|
||||
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
|
||||
|
||||
|
||||
#==================================================#
|
||||
# Speech to Text & Text to Speech #
|
||||
#==================================================#
|
||||
@@ -267,6 +271,16 @@ MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
|
||||
STT_API_KEY=
|
||||
TTS_API_KEY=
|
||||
|
||||
#==================================================#
|
||||
# RAG #
|
||||
#==================================================#
|
||||
# More info: https://www.librechat.ai/docs/configuration/rag_api
|
||||
|
||||
# RAG_OPENAI_BASEURL=
|
||||
# RAG_OPENAI_API_KEY=
|
||||
# EMBEDDINGS_PROVIDER=openai
|
||||
# EMBEDDINGS_MODEL=text-embedding-3-small
|
||||
|
||||
#===================================================#
|
||||
# User System #
|
||||
#===================================================#
|
||||
@@ -372,6 +386,11 @@ LDAP_BIND_CREDENTIALS=
|
||||
LDAP_USER_SEARCH_BASE=
|
||||
LDAP_SEARCH_FILTER=mail={{username}}
|
||||
LDAP_CA_CERT_PATH=
|
||||
# LDAP_TLS_REJECT_UNAUTHORIZED=
|
||||
# LDAP_LOGIN_USES_USERNAME=true
|
||||
# LDAP_ID=
|
||||
# LDAP_USERNAME=
|
||||
# LDAP_FULL_NAME=
|
||||
|
||||
#========================#
|
||||
# Email Password Reset #
|
||||
@@ -406,6 +425,18 @@ FIREBASE_APP_ID=
|
||||
ALLOW_SHARED_LINKS=true
|
||||
ALLOW_SHARED_LINKS_PUBLIC=true
|
||||
|
||||
#==============================#
|
||||
# Static File Cache Control #
|
||||
#==============================#
|
||||
|
||||
# Leave commented out to use default of 1 month for max-age and 1 week for s-maxage
|
||||
# NODE_ENV must be set to production for these to take effect
|
||||
# STATIC_CACHE_MAX_AGE=604800
|
||||
# STATIC_CACHE_S_MAX_AGE=259200
|
||||
|
||||
# If you have another service in front of your LibreChat doing compression, disable express based compression here
|
||||
# DISABLE_COMPRESSION=true
|
||||
|
||||
#===================================================#
|
||||
# UI #
|
||||
#===================================================#
|
||||
|
||||
@@ -12,6 +12,7 @@ module.exports = {
|
||||
'plugin:react-hooks/recommended',
|
||||
'plugin:jest/recommended',
|
||||
'prettier',
|
||||
'plugin:jsx-a11y/recommended',
|
||||
],
|
||||
ignorePatterns: [
|
||||
'client/dist/**/*',
|
||||
@@ -32,7 +33,7 @@ module.exports = {
|
||||
jsx: true,
|
||||
},
|
||||
},
|
||||
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import'],
|
||||
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import', 'jsx-a11y'],
|
||||
rules: {
|
||||
'react/react-in-jsx-scope': 'off',
|
||||
'@typescript-eslint/ban-ts-comment': ['error', { 'ts-ignore': 'allow' }],
|
||||
@@ -65,6 +66,7 @@ module.exports = {
|
||||
'no-restricted-syntax': 'off',
|
||||
'react/prop-types': ['off'],
|
||||
'react/display-name': ['off'],
|
||||
'no-nested-ternary': 'error',
|
||||
'no-unused-vars': ['error', { varsIgnorePattern: '^_' }],
|
||||
quotes: ['error', 'single'],
|
||||
},
|
||||
@@ -118,6 +120,8 @@ module.exports = {
|
||||
],
|
||||
rules: {
|
||||
'@typescript-eslint/no-explicit-any': 'error',
|
||||
'@typescript-eslint/no-unnecessary-condition': 'warn',
|
||||
'@typescript-eslint/strict-boolean-expressions': 'warn',
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
17
.github/workflows/a11y.yml
vendored
Normal file
17
.github/workflows/a11y.yml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: Lint for accessibility issues
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'client/src/**'
|
||||
|
||||
jobs:
|
||||
axe-linter:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dequelabs/axe-linter-action@v1
|
||||
with:
|
||||
api_key: ${{ secrets.AXE_LINTER_API_KEY }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
41
.github/workflows/deploy-dev.yml
vendored
Normal file
41
.github/workflows/deploy-dev.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Update Test Server
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Docker Dev Images Build"]
|
||||
types:
|
||||
- completed
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
if: |
|
||||
github.repository == 'danny-avila/LibreChat' &&
|
||||
(github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success')
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install SSH Key
|
||||
uses: shimataro/ssh-key-action@v2
|
||||
with:
|
||||
key: ${{ secrets.DO_SSH_PRIVATE_KEY }}
|
||||
known_hosts: ${{ secrets.DO_KNOWN_HOSTS }}
|
||||
|
||||
- name: Run update script on DigitalOcean Droplet
|
||||
env:
|
||||
DO_HOST: ${{ secrets.DO_HOST }}
|
||||
DO_USER: ${{ secrets.DO_USER }}
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF
|
||||
sudo -i -u danny bash << EEOF
|
||||
cd ~/LibreChat && \
|
||||
git fetch origin main && \
|
||||
npm run update:deployed && \
|
||||
git checkout do-deploy && \
|
||||
git rebase main && \
|
||||
npm run start:deployed && \
|
||||
echo "Update completed. Application should be running now."
|
||||
EEOF
|
||||
EOF
|
||||
35
.github/workflows/helmcharts.yml
vendored
Normal file
35
.github/workflows/helmcharts.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
name: Build Helm Charts on Tag
|
||||
|
||||
# The workflow is triggered when a tag is pushed
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
jobs:
|
||||
release:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v4
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
- name: Run chart-releaser
|
||||
uses: helm/chart-releaser-action@v1.6.0
|
||||
with:
|
||||
charts_dir: helmchart
|
||||
env:
|
||||
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -11,6 +11,7 @@ logs
|
||||
pids
|
||||
*.pid
|
||||
*.seed
|
||||
.git
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
lib-cov
|
||||
@@ -45,6 +46,7 @@ api/node_modules/
|
||||
client/node_modules/
|
||||
bower_components/
|
||||
*.d.ts
|
||||
!vite-env.d.ts
|
||||
|
||||
# Floobits
|
||||
.floo
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v0.7.3
|
||||
# v0.7.4
|
||||
|
||||
# Base node image
|
||||
FROM node:20-alpine AS node
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v0.7.3
|
||||
# v0.7.4
|
||||
|
||||
# Build API, Client and Data Provider
|
||||
FROM node:20-alpine AS base
|
||||
@@ -38,6 +38,6 @@ ENV HOST=0.0.0.0
|
||||
CMD ["node", "server/index.js"]
|
||||
|
||||
# Nginx setup
|
||||
FROM nginx:1.21.1-alpine AS prod-stage
|
||||
FROM nginx:1.27.0-alpine AS prod-stage
|
||||
COPY ./client/nginx.conf /etc/nginx/conf.d/default.conf
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://railway.app/template/b5k2mn?referralCode=myKrVZ">
|
||||
<a href="https://railway.app/template/b5k2mn?referralCode=HI9hWz">
|
||||
<img src="https://railway.app/button.svg" alt="Deploy on Railway" height="30">
|
||||
</a>
|
||||
<a href="https://zeabur.com/templates/0X2ZY8">
|
||||
@@ -50,7 +50,7 @@
|
||||
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
|
||||
- 🌿 Fork Messages & Conversations for Advanced Context control
|
||||
- 💬 Multimodal Chat:
|
||||
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o`), and Gemini Vision 📸
|
||||
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o` and `gpt-4o-mini`), and Gemini Vision 📸
|
||||
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
|
||||
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
|
||||
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
|
||||
|
||||
@@ -2,8 +2,10 @@ const Anthropic = require('@anthropic-ai/sdk');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
|
||||
const {
|
||||
getResponseSender,
|
||||
Constants,
|
||||
EModelEndpoint,
|
||||
anthropicSettings,
|
||||
getResponseSender,
|
||||
validateVisionModel,
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
@@ -16,6 +18,7 @@ const {
|
||||
} = require('./prompts');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
@@ -29,6 +32,8 @@ function delayBeforeRetry(attempts, baseDelay = 1000) {
|
||||
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
|
||||
}
|
||||
|
||||
const { legacy } = anthropicSettings;
|
||||
|
||||
class AnthropicClient extends BaseClient {
|
||||
constructor(apiKey, options = {}) {
|
||||
super(apiKey, options);
|
||||
@@ -61,15 +66,20 @@ class AnthropicClient extends BaseClient {
|
||||
const modelOptions = this.options.modelOptions || {};
|
||||
this.modelOptions = {
|
||||
...modelOptions,
|
||||
// set some good defaults (check for undefined in some cases because they may be 0)
|
||||
model: modelOptions.model || 'claude-1',
|
||||
temperature: typeof modelOptions.temperature === 'undefined' ? 1 : modelOptions.temperature, // 0 - 1, 1 is default
|
||||
topP: typeof modelOptions.topP === 'undefined' ? 0.7 : modelOptions.topP, // 0 - 1, default: 0.7
|
||||
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
|
||||
stop: modelOptions.stop, // no stop method for now
|
||||
model: modelOptions.model || anthropicSettings.model.default,
|
||||
};
|
||||
|
||||
this.isClaude3 = this.modelOptions.model.includes('claude-3');
|
||||
this.isLegacyOutput = !this.modelOptions.model.includes('claude-3-5-sonnet');
|
||||
|
||||
if (
|
||||
this.isLegacyOutput &&
|
||||
this.modelOptions.maxOutputTokens &&
|
||||
this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
|
||||
) {
|
||||
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
|
||||
}
|
||||
|
||||
this.useMessages = this.isClaude3 || !!this.options.attachments;
|
||||
|
||||
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
|
||||
@@ -119,10 +129,11 @@ class AnthropicClient extends BaseClient {
|
||||
|
||||
/**
|
||||
* Get the initialized Anthropic client.
|
||||
* @param {Partial<Anthropic.ClientOptions>} requestOptions - The options for the client.
|
||||
* @returns {Anthropic} The Anthropic client instance.
|
||||
*/
|
||||
getClient() {
|
||||
/** @type {Anthropic.default.RequestOptions} */
|
||||
getClient(requestOptions) {
|
||||
/** @type {Anthropic.ClientOptions} */
|
||||
const options = {
|
||||
fetch: this.fetch,
|
||||
apiKey: this.apiKey,
|
||||
@@ -136,6 +147,12 @@ class AnthropicClient extends BaseClient {
|
||||
options.baseURL = this.options.reverseProxyUrl;
|
||||
}
|
||||
|
||||
if (requestOptions?.model && requestOptions.model.includes('claude-3-5-sonnet')) {
|
||||
options.defaultHeaders = {
|
||||
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
|
||||
};
|
||||
}
|
||||
|
||||
return new Anthropic(options);
|
||||
}
|
||||
|
||||
@@ -556,8 +573,6 @@ class AnthropicClient extends BaseClient {
|
||||
}
|
||||
|
||||
logger.debug('modelOptions', { modelOptions });
|
||||
|
||||
const client = this.getClient();
|
||||
const metadata = {
|
||||
user_id: this.user,
|
||||
};
|
||||
@@ -585,7 +600,7 @@ class AnthropicClient extends BaseClient {
|
||||
|
||||
if (this.useMessages) {
|
||||
requestOptions.messages = payload;
|
||||
requestOptions.max_tokens = maxOutputTokens || 1500;
|
||||
requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
|
||||
} else {
|
||||
requestOptions.prompt = payload;
|
||||
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
|
||||
@@ -605,12 +620,14 @@ class AnthropicClient extends BaseClient {
|
||||
};
|
||||
|
||||
const maxRetries = 3;
|
||||
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||
async function processResponse() {
|
||||
let attempts = 0;
|
||||
|
||||
while (attempts < maxRetries) {
|
||||
let response;
|
||||
try {
|
||||
const client = this.getClient(requestOptions);
|
||||
response = await this.createResponse(client, requestOptions);
|
||||
|
||||
signal.addEventListener('abort', () => {
|
||||
@@ -627,6 +644,8 @@ class AnthropicClient extends BaseClient {
|
||||
} else if (completion.completion) {
|
||||
handleChunk(completion.completion);
|
||||
}
|
||||
|
||||
await sleep(streamRate);
|
||||
}
|
||||
|
||||
// Successful processing, exit loop
|
||||
@@ -737,7 +756,11 @@ class AnthropicClient extends BaseClient {
|
||||
};
|
||||
|
||||
try {
|
||||
const response = await this.createResponse(this.getClient(), requestOptions, true);
|
||||
const response = await this.createResponse(
|
||||
this.getClient(requestOptions),
|
||||
requestOptions,
|
||||
true,
|
||||
);
|
||||
let promptTokens = response?.usage?.input_tokens;
|
||||
let completionTokens = response?.usage?.output_tokens;
|
||||
if (!promptTokens) {
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
const crypto = require('crypto');
|
||||
const fetch = require('node-fetch');
|
||||
const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
|
||||
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
|
||||
const { supportsBalanceCheck, Constants, CacheKeys, Time } = require('librechat-data-provider');
|
||||
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
|
||||
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { getFiles } = require('~/models/File');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const TextStream = require('./TextStream');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
@@ -19,6 +20,14 @@ class BaseClient {
|
||||
day: 'numeric',
|
||||
});
|
||||
this.fetch = this.fetch.bind(this);
|
||||
/** @type {boolean} */
|
||||
this.skipSaveConvo = false;
|
||||
/** @type {boolean} */
|
||||
this.skipSaveUserMessage = false;
|
||||
/** @type {ClientDatabaseSavePromise} */
|
||||
this.userMessagePromise;
|
||||
/** @type {ClientDatabaseSavePromise} */
|
||||
this.responsePromise;
|
||||
}
|
||||
|
||||
setOptions() {
|
||||
@@ -84,19 +93,45 @@ class BaseClient {
|
||||
await stream.processTextStream(onProgress);
|
||||
}
|
||||
|
||||
/**
|
||||
* @returns {[string|undefined, string|undefined]}
|
||||
*/
|
||||
processOverideIds() {
|
||||
/** @type {Record<string, string | undefined>} */
|
||||
let { overrideConvoId, overrideUserMessageId } = this.options?.req?.body ?? {};
|
||||
if (overrideConvoId) {
|
||||
const [conversationId, index] = overrideConvoId.split(Constants.COMMON_DIVIDER);
|
||||
overrideConvoId = conversationId;
|
||||
if (index !== '0') {
|
||||
this.skipSaveConvo = true;
|
||||
}
|
||||
}
|
||||
if (overrideUserMessageId) {
|
||||
const [userMessageId, index] = overrideUserMessageId.split(Constants.COMMON_DIVIDER);
|
||||
overrideUserMessageId = userMessageId;
|
||||
if (index !== '0') {
|
||||
this.skipSaveUserMessage = true;
|
||||
}
|
||||
}
|
||||
|
||||
return [overrideConvoId, overrideUserMessageId];
|
||||
}
|
||||
|
||||
async setMessageOptions(opts = {}) {
|
||||
if (opts && opts.replaceOptions) {
|
||||
this.setOptions(opts);
|
||||
}
|
||||
|
||||
const [overrideConvoId, overrideUserMessageId] = this.processOverideIds();
|
||||
const { isEdited, isContinued } = opts;
|
||||
const user = opts.user ?? null;
|
||||
this.user = user;
|
||||
const saveOptions = this.getSaveOptions();
|
||||
this.abortController = opts.abortController ?? new AbortController();
|
||||
const conversationId = opts.conversationId ?? crypto.randomUUID();
|
||||
const conversationId = overrideConvoId ?? opts.conversationId ?? crypto.randomUUID();
|
||||
const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
|
||||
const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID();
|
||||
const userMessageId =
|
||||
overrideUserMessageId ?? opts.overrideParentMessageId ?? crypto.randomUUID();
|
||||
let responseMessageId = opts.responseMessageId ?? crypto.randomUUID();
|
||||
let head = isEdited ? responseMessageId : parentMessageId;
|
||||
this.currentMessages = (await this.loadHistory(conversationId, head)) ?? [];
|
||||
@@ -160,7 +195,7 @@ class BaseClient {
|
||||
}
|
||||
|
||||
if (typeof opts?.onStart === 'function') {
|
||||
opts.onStart(userMessage);
|
||||
opts.onStart(userMessage, responseMessageId);
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -450,8 +485,13 @@ class BaseClient {
|
||||
this.handleTokenCountMap(tokenCountMap);
|
||||
}
|
||||
|
||||
if (!isEdited) {
|
||||
await this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
if (!isEdited && !this.skipSaveUserMessage) {
|
||||
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
if (typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({
|
||||
userMessagePromise: this.userMessagePromise,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -500,15 +540,23 @@ class BaseClient {
|
||||
const completionTokens = this.getTokenCount(completion);
|
||||
await this.recordTokenUsage({ promptTokens, completionTokens });
|
||||
}
|
||||
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
if (this.userMessagePromise) {
|
||||
await this.userMessagePromise;
|
||||
}
|
||||
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
messageCache.set(
|
||||
responseMessageId,
|
||||
{
|
||||
text: responseMessage.text,
|
||||
complete: true,
|
||||
},
|
||||
Time.FIVE_MINUTES,
|
||||
);
|
||||
delete responseMessage.tokenCount;
|
||||
return responseMessage;
|
||||
}
|
||||
|
||||
async getConversation(conversationId, user = null) {
|
||||
return await getConvo(user, conversationId);
|
||||
}
|
||||
|
||||
async loadHistory(conversationId, parentMessageId = null) {
|
||||
logger.debug('[BaseClient] Loading history:', { conversationId, parentMessageId });
|
||||
|
||||
@@ -563,22 +611,41 @@ class BaseClient {
|
||||
* @param {string | null} user
|
||||
*/
|
||||
async saveMessageToDatabase(message, endpointOptions, user = null) {
|
||||
await saveMessage({
|
||||
...message,
|
||||
endpoint: this.options.endpoint,
|
||||
unfinished: false,
|
||||
user,
|
||||
});
|
||||
await saveConvo(user, {
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
endpointType: this.options.endpointType,
|
||||
...endpointOptions,
|
||||
});
|
||||
if (this.user && user !== this.user) {
|
||||
throw new Error('User mismatch.');
|
||||
}
|
||||
|
||||
const savedMessage = await saveMessage(
|
||||
this.options.req,
|
||||
{
|
||||
...message,
|
||||
endpoint: this.options.endpoint,
|
||||
unfinished: false,
|
||||
user,
|
||||
},
|
||||
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' },
|
||||
);
|
||||
|
||||
if (this.skipSaveConvo) {
|
||||
return { message: savedMessage };
|
||||
}
|
||||
|
||||
const conversation = await saveConvo(
|
||||
this.options.req,
|
||||
{
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
endpointType: this.options.endpointType,
|
||||
...endpointOptions,
|
||||
},
|
||||
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
|
||||
);
|
||||
|
||||
return { message: savedMessage, conversation };
|
||||
}
|
||||
|
||||
async updateMessageInDatabase(message) {
|
||||
await updateMessage(message);
|
||||
await updateMessage(this.options.req, message);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -13,13 +13,20 @@ const {
|
||||
endpointSettings,
|
||||
EModelEndpoint,
|
||||
VisionModes,
|
||||
Constants,
|
||||
AuthKeys,
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images');
|
||||
const { formatMessage, createContextHandlers } = require('./prompts');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const BaseClient = require('./BaseClient');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
const {
|
||||
formatMessage,
|
||||
createContextHandlers,
|
||||
titleInstruction,
|
||||
truncateText,
|
||||
} = require('./prompts');
|
||||
const BaseClient = require('./BaseClient');
|
||||
|
||||
const loc = 'us-central1';
|
||||
const publisher = 'google';
|
||||
@@ -591,12 +598,16 @@ class GoogleClient extends BaseClient {
|
||||
createLLM(clientOptions) {
|
||||
const model = clientOptions.modelName ?? clientOptions.model;
|
||||
if (this.project_id && this.isTextModel) {
|
||||
logger.debug('Creating Google VertexAI client');
|
||||
return new GoogleVertexAI(clientOptions);
|
||||
} else if (this.project_id && this.isChatModel) {
|
||||
logger.debug('Creating Chat Google VertexAI client');
|
||||
return new ChatGoogleVertexAI(clientOptions);
|
||||
} else if (this.project_id) {
|
||||
logger.debug('Creating VertexAI client');
|
||||
return new ChatVertexAI(clientOptions);
|
||||
} else if (model.includes('1.5')) {
|
||||
logger.debug('Creating GenAI client');
|
||||
return new GenAI(this.apiKey).getGenerativeModel(
|
||||
{
|
||||
...clientOptions,
|
||||
@@ -606,12 +617,14 @@ class GoogleClient extends BaseClient {
|
||||
);
|
||||
}
|
||||
|
||||
logger.debug('Creating Chat Google Generative AI client');
|
||||
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
|
||||
}
|
||||
|
||||
async getCompletion(_payload, options = {}) {
|
||||
const { onProgress, abortController } = options;
|
||||
const { parameters, instances } = _payload;
|
||||
const { onProgress, abortController } = options;
|
||||
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||
const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {};
|
||||
|
||||
let examples;
|
||||
@@ -664,6 +677,102 @@ class GoogleClient extends BaseClient {
|
||||
|
||||
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
|
||||
if (modelName?.includes('1.5') && !this.project_id) {
|
||||
const client = model;
|
||||
const requestOptions = {
|
||||
contents: _payload,
|
||||
};
|
||||
|
||||
if (this.options?.promptPrefix?.length) {
|
||||
requestOptions.systemInstruction = {
|
||||
parts: [
|
||||
{
|
||||
text: this.options.promptPrefix,
|
||||
},
|
||||
],
|
||||
};
|
||||
}
|
||||
|
||||
requestOptions.safetySettings = _payload.safetySettings;
|
||||
|
||||
const delay = modelName.includes('flash') ? 8 : 14;
|
||||
const result = await client.generateContentStream(requestOptions);
|
||||
for await (const chunk of result.stream) {
|
||||
const chunkText = chunk.text();
|
||||
await this.generateTextStream(chunkText, onProgress, {
|
||||
delay,
|
||||
});
|
||||
reply += chunkText;
|
||||
await sleep(streamRate);
|
||||
}
|
||||
return reply;
|
||||
}
|
||||
|
||||
const stream = await model.stream(messages, {
|
||||
signal: abortController.signal,
|
||||
timeout: 7000,
|
||||
safetySettings: _payload.safetySettings,
|
||||
});
|
||||
|
||||
let delay = this.options.streamRate || 8;
|
||||
|
||||
if (!this.options.streamRate) {
|
||||
if (this.isGenerativeModel) {
|
||||
delay = 12;
|
||||
}
|
||||
if (modelName.includes('flash')) {
|
||||
delay = 5;
|
||||
}
|
||||
}
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const chunkText = chunk?.content ?? chunk;
|
||||
await this.generateTextStream(chunkText, onProgress, {
|
||||
delay,
|
||||
});
|
||||
reply += chunkText;
|
||||
}
|
||||
|
||||
return reply;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
|
||||
*/
|
||||
async titleChatCompletion(_payload, options = {}) {
|
||||
const { abortController } = options;
|
||||
const { parameters, instances } = _payload;
|
||||
const { messages: _messages, examples: _examples } = instances?.[0] ?? {};
|
||||
|
||||
let clientOptions = { ...parameters, maxRetries: 2 };
|
||||
|
||||
logger.debug('Initialized title client options');
|
||||
|
||||
if (this.project_id) {
|
||||
clientOptions['authOptions'] = {
|
||||
credentials: {
|
||||
...this.serviceKey,
|
||||
},
|
||||
projectId: this.project_id,
|
||||
};
|
||||
}
|
||||
|
||||
if (!parameters) {
|
||||
clientOptions = { ...clientOptions, ...this.modelOptions };
|
||||
}
|
||||
|
||||
if (this.isGenerativeModel && !this.project_id) {
|
||||
clientOptions.modelName = clientOptions.model;
|
||||
delete clientOptions.model;
|
||||
}
|
||||
|
||||
const model = this.createLLM(clientOptions);
|
||||
|
||||
let reply = '';
|
||||
const messages = this.isTextModel ? _payload.trim() : _messages;
|
||||
|
||||
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
|
||||
if (modelName?.includes('1.5') && !this.project_id) {
|
||||
logger.debug('Identified titling model as 1.5 version');
|
||||
/** @type {GenerativeModel} */
|
||||
const client = model;
|
||||
const requestOptions = {
|
||||
@@ -683,38 +792,64 @@ class GoogleClient extends BaseClient {
|
||||
const safetySettings = _payload.safetySettings;
|
||||
requestOptions.safetySettings = safetySettings;
|
||||
|
||||
const delay = modelName.includes('flash') ? 8 : 14;
|
||||
const result = await client.generateContentStream(requestOptions);
|
||||
for await (const chunk of result.stream) {
|
||||
const chunkText = chunk.text();
|
||||
await this.generateTextStream(chunkText, onProgress, {
|
||||
delay,
|
||||
});
|
||||
reply += chunkText;
|
||||
}
|
||||
const result = await client.generateContent(requestOptions);
|
||||
|
||||
reply = result.response?.text();
|
||||
|
||||
return reply;
|
||||
} else {
|
||||
logger.debug('Beginning titling');
|
||||
const safetySettings = _payload.safetySettings;
|
||||
|
||||
const titleResponse = await model.invoke(messages, {
|
||||
signal: abortController.signal,
|
||||
timeout: 7000,
|
||||
safetySettings: safetySettings,
|
||||
});
|
||||
|
||||
reply = titleResponse.content;
|
||||
|
||||
return reply;
|
||||
}
|
||||
}
|
||||
|
||||
const safetySettings = _payload.safetySettings;
|
||||
const stream = await model.stream(messages, {
|
||||
signal: abortController.signal,
|
||||
timeout: 7000,
|
||||
safetySettings: safetySettings,
|
||||
});
|
||||
async titleConvo({ text, responseText = '' }) {
|
||||
let title = 'New Chat';
|
||||
const convo = `||>User:
|
||||
"${truncateText(text)}"
|
||||
||>Response:
|
||||
"${JSON.stringify(truncateText(responseText))}"`;
|
||||
|
||||
let delay = this.isGenerativeModel ? 12 : 8;
|
||||
if (modelName.includes('flash')) {
|
||||
delay = 5;
|
||||
let { prompt: payload } = await this.buildMessages([
|
||||
{
|
||||
text: `Please generate ${titleInstruction}
|
||||
|
||||
${convo}
|
||||
|
||||
||>Title:`,
|
||||
isCreatedByUser: true,
|
||||
author: this.userLabel,
|
||||
},
|
||||
]);
|
||||
|
||||
if (this.isVisionModel) {
|
||||
logger.warn(
|
||||
`Current vision model does not support titling without an attachment; falling back to default model ${settings.model.default}`,
|
||||
);
|
||||
|
||||
payload.parameters = { ...payload.parameters, model: settings.model.default };
|
||||
}
|
||||
for await (const chunk of stream) {
|
||||
const chunkText = chunk?.content ?? chunk;
|
||||
await this.generateTextStream(chunkText, onProgress, {
|
||||
delay,
|
||||
|
||||
try {
|
||||
title = await this.titleChatCompletion(payload, {
|
||||
abortController: new AbortController(),
|
||||
onProgress: () => {},
|
||||
});
|
||||
reply += chunkText;
|
||||
} catch (e) {
|
||||
logger.error('[GoogleClient] There was an issue generating the title', e);
|
||||
}
|
||||
|
||||
return reply;
|
||||
logger.debug(`Title response: ${title}`);
|
||||
return title;
|
||||
}
|
||||
|
||||
getSaveOptions() {
|
||||
@@ -733,38 +868,36 @@ class GoogleClient extends BaseClient {
|
||||
}
|
||||
|
||||
async sendCompletion(payload, opts = {}) {
|
||||
const modelName = payload.parameters?.model;
|
||||
|
||||
if (modelName && modelName.toLowerCase().includes('gemini')) {
|
||||
const safetySettings = [
|
||||
{
|
||||
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
||||
threshold:
|
||||
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HATE_SPEECH',
|
||||
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HARASSMENT',
|
||||
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
||||
threshold:
|
||||
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
];
|
||||
|
||||
payload.safetySettings = safetySettings;
|
||||
}
|
||||
payload.safetySettings = this.getSafetySettings();
|
||||
|
||||
let reply = '';
|
||||
reply = await this.getCompletion(payload, opts);
|
||||
return reply.trim();
|
||||
}
|
||||
|
||||
getSafetySettings() {
|
||||
return [
|
||||
{
|
||||
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
||||
threshold:
|
||||
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HATE_SPEECH',
|
||||
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HARASSMENT',
|
||||
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
||||
threshold:
|
||||
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/* TO-DO: Handle tokens with Google tokenization NOTE: these are required */
|
||||
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
|
||||
if (tokenizersCache[encoding]) {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { Ollama } = require('ollama');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { deriveBaseURL } = require('~/utils');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const ollamaPayloadSchema = z.object({
|
||||
@@ -40,6 +42,7 @@ const getValidBase64 = (imageUrl) => {
|
||||
class OllamaClient {
|
||||
constructor(options = {}) {
|
||||
const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434');
|
||||
this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||
/** @type {Ollama} */
|
||||
this.client = new Ollama({ host });
|
||||
}
|
||||
@@ -136,6 +139,8 @@ class OllamaClient {
|
||||
stream.controller.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
await sleep(this.streamRate);
|
||||
}
|
||||
}
|
||||
// TODO: regular completion
|
||||
|
||||
@@ -27,7 +27,6 @@ const {
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { updateTokenWebsocket } = require('~/server/services/Files/Audio');
|
||||
const { isEnabled, sleep } = require('~/server/utils');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
@@ -595,7 +594,6 @@ class OpenAIClient extends BaseClient {
|
||||
payload,
|
||||
(progressMessage) => {
|
||||
if (progressMessage === '[DONE]') {
|
||||
updateTokenWebsocket('[DONE]');
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1184,8 +1182,10 @@ ${convo}
|
||||
});
|
||||
}
|
||||
|
||||
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
|
||||
|
||||
if (this.message_file_map && this.isOllama) {
|
||||
const ollamaClient = new OllamaClient({ baseURL });
|
||||
const ollamaClient = new OllamaClient({ baseURL, streamRate });
|
||||
return await ollamaClient.chatCompletion({
|
||||
payload: modelOptions,
|
||||
onProgress,
|
||||
@@ -1223,8 +1223,6 @@ ${convo}
|
||||
}
|
||||
});
|
||||
|
||||
const azureDelay = this.modelOptions.model?.includes('gpt-4') ? 30 : 17;
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.choices[0]?.delta?.content || '';
|
||||
intermediateReply += token;
|
||||
@@ -1234,9 +1232,7 @@ ${convo}
|
||||
break;
|
||||
}
|
||||
|
||||
if (this.azure) {
|
||||
await sleep(azureDelay);
|
||||
}
|
||||
await sleep(streamRate);
|
||||
}
|
||||
|
||||
if (!UnexpectedRoleError) {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const OpenAIClient = require('./OpenAIClient');
|
||||
const { CallbackManager } = require('langchain/callbacks');
|
||||
const { CacheKeys, Time } = require('librechat-data-provider');
|
||||
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
|
||||
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
|
||||
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
|
||||
@@ -11,6 +12,7 @@ const { SelfReflectionTool } = require('./tools');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { extractBaseURL } = require('~/utils');
|
||||
const { loadTools } = require('./tools/util');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
class PluginsClient extends OpenAIClient {
|
||||
@@ -220,6 +222,13 @@ class PluginsClient extends OpenAIClient {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {TMessage} responseMessage
|
||||
* @param {Partial<TMessage>} saveOptions
|
||||
* @param {string} user
|
||||
* @returns
|
||||
*/
|
||||
async handleResponseMessage(responseMessage, saveOptions, user) {
|
||||
const { output, errorMessage, ...result } = this.result;
|
||||
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
|
||||
@@ -238,12 +247,32 @@ class PluginsClient extends OpenAIClient {
|
||||
await this.recordTokenUsage(responseMessage);
|
||||
}
|
||||
|
||||
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
messageCache.set(
|
||||
responseMessage.messageId,
|
||||
{
|
||||
text: responseMessage.text,
|
||||
complete: true,
|
||||
},
|
||||
Time.FIVE_MINUTES,
|
||||
);
|
||||
delete responseMessage.tokenCount;
|
||||
return { ...responseMessage, ...result };
|
||||
}
|
||||
|
||||
async sendMessage(message, opts = {}) {
|
||||
/** @type {{ filteredTools: string[], includedTools: string[] }} */
|
||||
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin));
|
||||
this.options.tools = tools;
|
||||
} else {
|
||||
const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin));
|
||||
this.options.tools = tools;
|
||||
}
|
||||
|
||||
// If a message is edited, no tools can be used.
|
||||
const completionMode = this.options.tools.length === 0 || opts.isEdited;
|
||||
if (completionMode) {
|
||||
@@ -301,7 +330,15 @@ class PluginsClient extends OpenAIClient {
|
||||
if (payload) {
|
||||
this.currentMessages = payload;
|
||||
}
|
||||
await this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
|
||||
if (!this.skipSaveUserMessage) {
|
||||
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
|
||||
if (typeof opts?.getReqData === 'function') {
|
||||
opts.getReqData({
|
||||
userMessagePromise: this.userMessagePromise,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
if (isEnabled(process.env.CHECK_BALANCE)) {
|
||||
await checkBalance({
|
||||
|
||||
@@ -1,44 +1,3 @@
|
||||
/*
|
||||
module.exports = `You are ChatGPT, a Large Language model with useful tools.
|
||||
|
||||
Talk to the human and provide meaningful answers when questions are asked.
|
||||
|
||||
Use the tools when you need them, but use your own knowledge if you are confident of the answer. Keep answers short and concise.
|
||||
|
||||
A tool is not usually needed for creative requests, so do your best to answer them without tools.
|
||||
|
||||
Avoid repeating identical answers if it appears before. Only fulfill the human's requests, do not create extra steps beyond what the human has asked for.
|
||||
|
||||
Your input for 'Action' should be the name of tool used only.
|
||||
|
||||
Be honest. If you can't answer something, or a tool is not appropriate, say you don't know or answer to the best of your ability.
|
||||
|
||||
Attempt to fulfill the human's requests in as few actions as possible`;
|
||||
*/
|
||||
|
||||
// module.exports = `You are ChatGPT, a highly knowledgeable and versatile large language model.
|
||||
|
||||
// Engage with the Human conversationally, providing concise and meaningful answers to questions. Utilize built-in tools when necessary, except for creative requests, where relying on your own knowledge is preferred. Aim for variety and avoid repetitive answers.
|
||||
|
||||
// For your 'Action' input, state the name of the tool used only, and honor user requests without adding extra steps. Always be honest; if you cannot provide an appropriate answer or tool, admit that or do your best.
|
||||
|
||||
// Strive to meet the user's needs efficiently with minimal actions.`;
|
||||
|
||||
// import {
|
||||
// BasePromptTemplate,
|
||||
// BaseStringPromptTemplate,
|
||||
// SerializedBasePromptTemplate,
|
||||
// renderTemplate,
|
||||
// } from "langchain/prompts";
|
||||
|
||||
// prefix: `You are ChatGPT, a highly knowledgeable and versatile large language model.
|
||||
// Your objective is to help users by understanding their intent and choosing the best action. Prioritize direct, specific responses. Use concise, varied answers and rely on your knowledge for creative tasks. Utilize tools when needed, and structure results for machine compatibility.
|
||||
// prefix: `Objective: to comprehend human intentions based on user input and available tools. Goal: identify the best action to directly address the human's query. In your subsequent steps, you will utilize the chosen action. You may select multiple actions and list them in a meaningful order. Prioritize actions that directly relate to the user's query over general ones. Ensure that the generated thought is highly specific and explicit to best match the user's expectations. Construct the result in a manner that an online open-API would most likely expect. Provide concise and meaningful answers to human queries. Utilize tools when necessary. Relying on your own knowledge is preferred for creative requests. Aim for variety and avoid repetitive answers.
|
||||
|
||||
// # Available Actions & Tools:
|
||||
// N/A: no suitable action, use your own knowledge.`,
|
||||
// suffix: `Remember, all your responses MUST adhere to the described format and only respond if the format is followed. Output exactly with the requested format, avoiding any other text as this will be parsed by a machine. Following 'Action:', provide only one of the actions listed above. If a tool is not necessary, deduce this quickly and finish your response. Honor the human's requests without adding extra steps. Carry out tasks in the sequence written by the human. Always be honest; if you cannot provide an appropriate answer or tool, do your best with your own knowledge. Strive to meet the user's needs efficiently with minimal actions.`;
|
||||
|
||||
module.exports = {
|
||||
'gpt3-v1': {
|
||||
prefix: `Objective: Understand human intentions using user input and available tools. Goal: Identify the most suitable actions to directly address user queries.
|
||||
|
||||
@@ -60,10 +60,10 @@ function addImages(intermediateSteps, responseMessage) {
|
||||
if (!observation || !observation.includes('![')) {
|
||||
return;
|
||||
}
|
||||
const observedImagePath = observation.match(/!\[.*\]\([^)]*\)/g);
|
||||
const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g);
|
||||
if (observedImagePath && !responseMessage.text.includes(observedImagePath[0])) {
|
||||
responseMessage.text += '\n' + observation;
|
||||
logger.debug('[addImages] added image from intermediateSteps:', observation);
|
||||
responseMessage.text += '\n' + observedImagePath[0];
|
||||
logger.debug('[addImages] added image from intermediateSteps:', observedImagePath[0]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@@ -81,4 +81,62 @@ describe('addImages', () => {
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe(`${originalText}\n${imageMarkdown}`);
|
||||
});
|
||||
|
||||
it('should extract only image markdowns when there is text between them', () => {
|
||||
const markdownWithTextBetweenImages = `
|
||||

|
||||
Some text between images that should not be included.
|
||||

|
||||
More text that should be ignored.
|
||||

|
||||
`;
|
||||
intermediateSteps.push({ observation: markdownWithTextBetweenImages });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should only return the first image when multiple images are present', () => {
|
||||
const markdownWithMultipleImages = `
|
||||

|
||||

|
||||

|
||||
`;
|
||||
intermediateSteps.push({ observation: markdownWithMultipleImages });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should not include any text or metadata surrounding the image markdown', () => {
|
||||
const markdownWithMetadata = `
|
||||
Title: Test Document
|
||||
Author: John Doe
|
||||

|
||||
Some content after the image.
|
||||
Vector values: [0.1, 0.2, 0.3]
|
||||
`;
|
||||
intermediateSteps.push({ observation: markdownWithMetadata });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
|
||||
it('should handle complex markdown with multiple images and only return the first one', () => {
|
||||
const complexMarkdown = `
|
||||
# Document Title
|
||||
|
||||
## Section 1
|
||||
Here's some text with an embedded image:
|
||||

|
||||
|
||||
## Section 2
|
||||
More text here...
|
||||

|
||||
|
||||
### Subsection
|
||||
Even more content
|
||||

|
||||
`;
|
||||
intermediateSteps.push({ observation: complexMarkdown });
|
||||
addImages(intermediateSteps, responseMessage);
|
||||
expect(responseMessage.text).toBe('\n');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
const AnthropicClient = require('../AnthropicClient');
|
||||
const { anthropicSettings } = require('librechat-data-provider');
|
||||
const AnthropicClient = require('~/app/clients/AnthropicClient');
|
||||
|
||||
const HUMAN_PROMPT = '\n\nHuman:';
|
||||
const AI_PROMPT = '\n\nAssistant:';
|
||||
|
||||
@@ -22,7 +24,7 @@ describe('AnthropicClient', () => {
|
||||
const options = {
|
||||
modelOptions: {
|
||||
model,
|
||||
temperature: 0.7,
|
||||
temperature: anthropicSettings.temperature.default,
|
||||
},
|
||||
};
|
||||
client = new AnthropicClient('test-api-key');
|
||||
@@ -33,7 +35,42 @@ describe('AnthropicClient', () => {
|
||||
it('should set the options correctly', () => {
|
||||
expect(client.apiKey).toBe('test-api-key');
|
||||
expect(client.modelOptions.model).toBe(model);
|
||||
expect(client.modelOptions.temperature).toBe(0.7);
|
||||
expect(client.modelOptions.temperature).toBe(anthropicSettings.temperature.default);
|
||||
});
|
||||
|
||||
it('should set legacy maxOutputTokens for non-Claude-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-2',
|
||||
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
|
||||
},
|
||||
});
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
});
|
||||
it('should not set maxOutputTokens if not provided', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3',
|
||||
},
|
||||
});
|
||||
expect(client.modelOptions.maxOutputTokens).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should not set legacy maxOutputTokens for Claude-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus-20240229',
|
||||
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
|
||||
},
|
||||
});
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -136,4 +173,57 @@ describe('AnthropicClient', () => {
|
||||
expect(prompt).toContain('You are Claude-2');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getClient', () => {
|
||||
it('should set legacy maxOutputTokens for non-Claude-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-2',
|
||||
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
|
||||
},
|
||||
});
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
});
|
||||
|
||||
it('should not set legacy maxOutputTokens for Claude-3 models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-3-opus-20240229',
|
||||
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
|
||||
},
|
||||
});
|
||||
expect(client.modelOptions.maxOutputTokens).toBe(
|
||||
anthropicSettings.legacy.maxOutputTokens.default,
|
||||
);
|
||||
});
|
||||
|
||||
it('should add beta header for claude-3-5-sonnet model', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
const modelOptions = {
|
||||
model: 'claude-3-5-sonnet-20240307',
|
||||
};
|
||||
client.setOptions({ modelOptions });
|
||||
const anthropicClient = client.getClient(modelOptions);
|
||||
expect(anthropicClient._options.defaultHeaders).toBeDefined();
|
||||
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
|
||||
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
|
||||
'max-tokens-3-5-sonnet-2024-07-15',
|
||||
);
|
||||
});
|
||||
|
||||
it('should not add beta header for other models', () => {
|
||||
const client = new AnthropicClient('test-api-key');
|
||||
client.setOptions({
|
||||
modelOptions: {
|
||||
model: 'claude-2',
|
||||
},
|
||||
});
|
||||
const anthropicClient = client.getClient();
|
||||
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const { initializeFakeClient } = require('./FakeClient');
|
||||
|
||||
jest.mock('../../../lib/db/connectDb');
|
||||
jest.mock('~/lib/db/connectDb');
|
||||
jest.mock('~/models', () => ({
|
||||
User: jest.fn(),
|
||||
Key: jest.fn(),
|
||||
@@ -576,7 +576,11 @@ describe('BaseClient', () => {
|
||||
const onStart = jest.fn();
|
||||
const opts = { onStart };
|
||||
await TestClient.sendMessage('Hello, world!', opts);
|
||||
expect(onStart).toHaveBeenCalledWith(expect.objectContaining({ text: 'Hello, world!' }));
|
||||
|
||||
expect(onStart).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ text: 'Hello, world!' }),
|
||||
expect.any(String),
|
||||
);
|
||||
});
|
||||
|
||||
test('saveMessageToDatabase is called with the correct arguments', async () => {
|
||||
@@ -627,5 +631,32 @@ describe('BaseClient', () => {
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
test('userMessagePromise is awaited before saving response message', async () => {
|
||||
// Mock the saveMessageToDatabase method
|
||||
TestClient.saveMessageToDatabase = jest.fn().mockImplementation(() => {
|
||||
return new Promise((resolve) => setTimeout(resolve, 100)); // Simulate a delay
|
||||
});
|
||||
|
||||
// Send a message
|
||||
const messagePromise = TestClient.sendMessage('Hello, world!');
|
||||
|
||||
// Wait a short time to ensure the user message save has started
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
// Check that saveMessageToDatabase has been called once (for the user message)
|
||||
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledTimes(1);
|
||||
|
||||
// Wait for the message to be fully processed
|
||||
await messagePromise;
|
||||
|
||||
// Check that saveMessageToDatabase has been called twice (once for user message, once for response)
|
||||
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledTimes(2);
|
||||
|
||||
// Check the order of calls
|
||||
const calls = TestClient.saveMessageToDatabase.mock.calls;
|
||||
expect(calls[0][0].isCreatedByUser).toBe(true); // First call should be for user message
|
||||
expect(calls[1][0].isCreatedByUser).toBe(false); // Second call should be for response message
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -38,7 +38,12 @@ const run = async () => {
|
||||
"On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil and pain. These cases are perfectly simple and easy to distinguish. In a free hour, when our power of choice is untrammelled and when nothing prevents our being able to do what we like best, every pleasure is to be welcomed and every pain avoided. But in certain circumstances and owing to the claims of duty or the obligations of business it will frequently occur that pleasures have to be repudiated and annoyances accepted. The wise man therefore always holds in these matters to this principle of selection: he rejects pleasures to secure other greater pleasures, or else he endures pains to avoid worse pains."
|
||||
`;
|
||||
const model = 'gpt-3.5-turbo';
|
||||
const maxContextTokens = model === 'gpt-4' ? 8191 : model === 'gpt-4-32k' ? 32767 : 4095; // 1 less than maximum
|
||||
let maxContextTokens = 4095;
|
||||
if (model === 'gpt-4') {
|
||||
maxContextTokens = 8191;
|
||||
} else if (model === 'gpt-4-32k') {
|
||||
maxContextTokens = 32767;
|
||||
}
|
||||
const clientOptions = {
|
||||
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
|
||||
maxContextTokens,
|
||||
|
||||
@@ -194,6 +194,7 @@ describe('PluginsClient', () => {
|
||||
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Azure OpenAI tests specific to Plugins', () => {
|
||||
// TODO: add more tests for Azure OpenAI integration with Plugins
|
||||
// let client;
|
||||
@@ -220,4 +221,94 @@ describe('PluginsClient', () => {
|
||||
spy.mockRestore();
|
||||
});
|
||||
});
|
||||
|
||||
describe('sendMessage with filtered tools', () => {
|
||||
let TestAgent;
|
||||
const apiKey = 'fake-api-key';
|
||||
const mockTools = [{ name: 'tool1' }, { name: 'tool2' }, { name: 'tool3' }, { name: 'tool4' }];
|
||||
|
||||
beforeEach(() => {
|
||||
TestAgent = new PluginsClient(apiKey, {
|
||||
tools: mockTools,
|
||||
modelOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
temperature: 0,
|
||||
max_tokens: 2,
|
||||
},
|
||||
agentOptions: {
|
||||
model: 'gpt-3.5-turbo',
|
||||
},
|
||||
});
|
||||
|
||||
TestAgent.options.req = {
|
||||
app: {
|
||||
locals: {},
|
||||
},
|
||||
};
|
||||
|
||||
TestAgent.sendMessage = jest.fn().mockImplementation(async () => {
|
||||
const { filteredTools = [], includedTools = [] } = TestAgent.options.req.app.locals;
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
const tools = TestAgent.options.tools.filter((plugin) =>
|
||||
includedTools.includes(plugin.name),
|
||||
);
|
||||
TestAgent.options.tools = tools;
|
||||
} else {
|
||||
const tools = TestAgent.options.tools.filter(
|
||||
(plugin) => !filteredTools.includes(plugin.name),
|
||||
);
|
||||
TestAgent.options.tools = tools;
|
||||
}
|
||||
|
||||
return {
|
||||
text: 'Mocked response',
|
||||
tools: TestAgent.options.tools,
|
||||
};
|
||||
});
|
||||
});
|
||||
|
||||
test('should filter out tools when filteredTools is provided', async () => {
|
||||
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
expect.objectContaining({ name: 'tool4' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should only include specified tools when includedTools is provided', async () => {
|
||||
TestAgent.options.req.app.locals.includedTools = ['tool2', 'tool4'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
expect.objectContaining({ name: 'tool4' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should prioritize includedTools over filteredTools', async () => {
|
||||
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
|
||||
TestAgent.options.req.app.locals.includedTools = ['tool1', 'tool2'];
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(2);
|
||||
expect(response.tools).toEqual(
|
||||
expect.arrayContaining([
|
||||
expect.objectContaining({ name: 'tool1' }),
|
||||
expect.objectContaining({ name: 'tool2' }),
|
||||
]),
|
||||
);
|
||||
});
|
||||
|
||||
test('should not modify tools when no filters are provided', async () => {
|
||||
const response = await TestAgent.sendMessage('Test message');
|
||||
expect(response.tools).toHaveLength(4);
|
||||
expect(response.tools).toEqual(expect.arrayContaining(mockTools));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -12,9 +12,15 @@ class GoogleSearchResults extends Tool {
|
||||
this.envVarApiKey = 'GOOGLE_SEARCH_API_KEY';
|
||||
this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
|
||||
this.override = fields.override ?? false;
|
||||
this.apiKey = fields.apiKey ?? getEnvironmentVariable(this.envVarApiKey);
|
||||
this.apiKey = fields[this.envVarApiKey] ?? getEnvironmentVariable(this.envVarApiKey);
|
||||
this.searchEngineId =
|
||||
fields.searchEngineId ?? getEnvironmentVariable(this.envVarSearchEngineId);
|
||||
fields[this.envVarSearchEngineId] ?? getEnvironmentVariable(this.envVarSearchEngineId);
|
||||
|
||||
if (!this.override && (!this.apiKey || !this.searchEngineId)) {
|
||||
throw new Error(
|
||||
`Missing ${this.envVarApiKey} or ${this.envVarSearchEngineId} environment variable.`,
|
||||
);
|
||||
}
|
||||
|
||||
this.kwargs = fields?.kwargs ?? {};
|
||||
this.name = 'google';
|
||||
|
||||
@@ -12,7 +12,7 @@ class TavilySearchResults extends Tool {
|
||||
this.envVar = 'TAVILY_API_KEY';
|
||||
/* Used to initialize the Tool without necessary variables. */
|
||||
this.override = fields.override ?? false;
|
||||
this.apiKey = fields.apiKey ?? this.getApiKey();
|
||||
this.apiKey = fields[this.envVar] ?? this.getApiKey();
|
||||
|
||||
this.kwargs = fields?.kwargs ?? {};
|
||||
this.name = 'tavily_search_results_json';
|
||||
@@ -82,7 +82,9 @@ class TavilySearchResults extends Tool {
|
||||
|
||||
const json = await response.json();
|
||||
if (!response.ok) {
|
||||
throw new Error(`Request failed with status ${response.status}: ${json.error}`);
|
||||
throw new Error(
|
||||
`Request failed with status ${response.status}: ${json?.detail?.error || json?.error}`,
|
||||
);
|
||||
}
|
||||
|
||||
return JSON.stringify(json);
|
||||
|
||||
50
api/app/clients/tools/structured/specs/GoogleSearch.spec.js
Normal file
50
api/app/clients/tools/structured/specs/GoogleSearch.spec.js
Normal file
@@ -0,0 +1,50 @@
|
||||
const GoogleSearch = require('../GoogleSearch');
|
||||
|
||||
jest.mock('node-fetch');
|
||||
jest.mock('@langchain/core/utils/env');
|
||||
|
||||
describe('GoogleSearch', () => {
|
||||
let originalEnv;
|
||||
const mockApiKey = 'mock_api';
|
||||
const mockSearchEngineId = 'mock_search_engine_id';
|
||||
|
||||
beforeAll(() => {
|
||||
originalEnv = { ...process.env };
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
process.env = {
|
||||
...originalEnv,
|
||||
GOOGLE_SEARCH_API_KEY: mockApiKey,
|
||||
GOOGLE_CSE_ID: mockSearchEngineId,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('should use mockApiKey and mockSearchEngineId when environment variables are not set', () => {
|
||||
const instance = new GoogleSearch({
|
||||
GOOGLE_SEARCH_API_KEY: mockApiKey,
|
||||
GOOGLE_CSE_ID: mockSearchEngineId,
|
||||
});
|
||||
expect(instance.apiKey).toBe(mockApiKey);
|
||||
expect(instance.searchEngineId).toBe(mockSearchEngineId);
|
||||
});
|
||||
|
||||
it('should throw an error if GOOGLE_SEARCH_API_KEY or GOOGLE_CSE_ID is missing', () => {
|
||||
delete process.env.GOOGLE_SEARCH_API_KEY;
|
||||
expect(() => new GoogleSearch()).toThrow(
|
||||
'Missing GOOGLE_SEARCH_API_KEY or GOOGLE_CSE_ID environment variable.',
|
||||
);
|
||||
|
||||
process.env.GOOGLE_SEARCH_API_KEY = mockApiKey;
|
||||
delete process.env.GOOGLE_CSE_ID;
|
||||
expect(() => new GoogleSearch()).toThrow(
|
||||
'Missing GOOGLE_SEARCH_API_KEY or GOOGLE_CSE_ID environment variable.',
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,38 @@
|
||||
const TavilySearchResults = require('../TavilySearchResults');
|
||||
|
||||
jest.mock('node-fetch');
|
||||
jest.mock('@langchain/core/utils/env');
|
||||
|
||||
describe('TavilySearchResults', () => {
|
||||
let originalEnv;
|
||||
const mockApiKey = 'mock_api_key';
|
||||
|
||||
beforeAll(() => {
|
||||
originalEnv = { ...process.env };
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules();
|
||||
process.env = {
|
||||
...originalEnv,
|
||||
TAVILY_API_KEY: mockApiKey,
|
||||
};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('should throw an error if TAVILY_API_KEY is missing', () => {
|
||||
delete process.env.TAVILY_API_KEY;
|
||||
expect(() => new TavilySearchResults()).toThrow('Missing TAVILY_API_KEY environment variable.');
|
||||
});
|
||||
|
||||
it('should use mockApiKey when TAVILY_API_KEY is not set in the environment', () => {
|
||||
const instance = new TavilySearchResults({
|
||||
TAVILY_API_KEY: mockApiKey,
|
||||
});
|
||||
expect(instance.apiKey).toBe(mockApiKey);
|
||||
});
|
||||
});
|
||||
35
api/cache/getLogStores.js
vendored
35
api/cache/getLogStores.js
vendored
@@ -1,13 +1,11 @@
|
||||
const Keyv = require('keyv');
|
||||
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
|
||||
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
|
||||
const { logFile, violationFile } = require('./keyvFiles');
|
||||
const { math, isEnabled } = require('~/server/utils');
|
||||
const keyvRedis = require('./keyvRedis');
|
||||
const keyvMongo = require('./keyvMongo');
|
||||
|
||||
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
|
||||
const THIRTY_MINUTES = 1800000;
|
||||
const TEN_MINUTES = 600000;
|
||||
|
||||
const duration = math(BAN_DURATION, 7200000);
|
||||
|
||||
@@ -25,17 +23,25 @@ const config = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
|
||||
|
||||
const audioRuns = isEnabled(USE_REDIS) // ttl: 30 minutes
|
||||
? new Keyv({ store: keyvRedis, ttl: TEN_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: TEN_MINUTES });
|
||||
const roles = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.ROLES });
|
||||
|
||||
const tokenConfig = isEnabled(USE_REDIS) // ttl: 30 minutes
|
||||
? new Keyv({ store: keyvRedis, ttl: THIRTY_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: THIRTY_MINUTES });
|
||||
const audioRuns = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
|
||||
? new Keyv({ store: keyvRedis, ttl: 120000 })
|
||||
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
|
||||
const messages = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.FIVE_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.FIVE_MINUTES });
|
||||
|
||||
const tokenConfig = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
|
||||
|
||||
const genTitle = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
|
||||
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
|
||||
|
||||
const modelQueries = isEnabled(process.env.USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
@@ -43,9 +49,10 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
|
||||
|
||||
const abortKeys = isEnabled(USE_REDIS)
|
||||
? new Keyv({ store: keyvRedis })
|
||||
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
|
||||
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
|
||||
|
||||
const namespaces = {
|
||||
[CacheKeys.ROLES]: roles,
|
||||
[CacheKeys.CONFIG_STORE]: config,
|
||||
pending_req,
|
||||
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
|
||||
@@ -62,6 +69,7 @@ const namespaces = {
|
||||
registrations: createViolationInstance('registrations'),
|
||||
[ViolationTypes.TTS_LIMIT]: createViolationInstance(ViolationTypes.TTS_LIMIT),
|
||||
[ViolationTypes.STT_LIMIT]: createViolationInstance(ViolationTypes.STT_LIMIT),
|
||||
[ViolationTypes.CONVO_ACCESS]: createViolationInstance(ViolationTypes.CONVO_ACCESS),
|
||||
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
|
||||
[ViolationTypes.VERIFY_EMAIL_LIMIT]: createViolationInstance(ViolationTypes.VERIFY_EMAIL_LIMIT),
|
||||
[ViolationTypes.RESET_PASSWORD_LIMIT]: createViolationInstance(
|
||||
@@ -76,6 +84,7 @@ const namespaces = {
|
||||
[CacheKeys.GEN_TITLE]: genTitle,
|
||||
[CacheKeys.MODEL_QUERIES]: modelQueries,
|
||||
[CacheKeys.AUDIO_RUNS]: audioRuns,
|
||||
[CacheKeys.MESSAGES]: messages,
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -109,6 +109,14 @@ const condenseArray = (item) => {
|
||||
* @returns {string} - The formatted log message.
|
||||
*/
|
||||
const debugTraverse = winston.format.printf(({ level, message, timestamp, ...metadata }) => {
|
||||
if (!message) {
|
||||
return `${timestamp} ${level}`;
|
||||
}
|
||||
|
||||
if (!message?.trim || typeof message !== 'string') {
|
||||
return `${timestamp} ${level}: ${JSON.stringify(message)}`;
|
||||
}
|
||||
|
||||
let msg = `${timestamp} ${level}: ${truncateLongStrings(message?.trim(), 150)}`;
|
||||
try {
|
||||
if (level !== 'debug') {
|
||||
|
||||
61
api/models/Categories.js
Normal file
61
api/models/Categories.js
Normal file
@@ -0,0 +1,61 @@
|
||||
const { logger } = require('~/config');
|
||||
// const { Categories } = require('./schema/categories');
|
||||
const options = [
|
||||
{
|
||||
label: '',
|
||||
value: '',
|
||||
},
|
||||
{
|
||||
label: 'idea',
|
||||
value: 'idea',
|
||||
},
|
||||
{
|
||||
label: 'travel',
|
||||
value: 'travel',
|
||||
},
|
||||
{
|
||||
label: 'teach_or_explain',
|
||||
value: 'teach_or_explain',
|
||||
},
|
||||
{
|
||||
label: 'write',
|
||||
value: 'write',
|
||||
},
|
||||
{
|
||||
label: 'shop',
|
||||
value: 'shop',
|
||||
},
|
||||
{
|
||||
label: 'code',
|
||||
value: 'code',
|
||||
},
|
||||
{
|
||||
label: 'misc',
|
||||
value: 'misc',
|
||||
},
|
||||
{
|
||||
label: 'roleplay',
|
||||
value: 'roleplay',
|
||||
},
|
||||
{
|
||||
label: 'finance',
|
||||
value: 'finance',
|
||||
},
|
||||
];
|
||||
|
||||
module.exports = {
|
||||
/**
|
||||
* Retrieves the categories asynchronously.
|
||||
* @returns {Promise<TGetCategoriesResponse>} An array of category objects.
|
||||
* @throws {Error} If there is an error retrieving the categories.
|
||||
*/
|
||||
getCategories: async () => {
|
||||
try {
|
||||
// const categories = await Categories.find();
|
||||
return options;
|
||||
} catch (error) {
|
||||
logger.error('Error getting categories', error);
|
||||
return [];
|
||||
}
|
||||
},
|
||||
};
|
||||
@@ -2,6 +2,20 @@ const Conversation = require('./schema/convoSchema');
|
||||
const { getMessages, deleteMessages } = require('./Message');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
/**
|
||||
* Searches for a conversation by conversationId and returns a lean document with only conversationId and user.
|
||||
* @param {string} conversationId - The conversation's ID.
|
||||
* @returns {Promise<{conversationId: string, user: string} | null>} The conversation object with selected fields or null if not found.
|
||||
*/
|
||||
const searchConversation = async (conversationId) => {
|
||||
try {
|
||||
return await Conversation.findOne({ conversationId }, 'conversationId user').lean();
|
||||
} catch (error) {
|
||||
logger.error('[searchConversation] Error searching conversation', error);
|
||||
throw new Error('Error searching conversation');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves a single conversation for a given user and conversation ID.
|
||||
* @param {string} user - The user's ID.
|
||||
@@ -19,20 +33,40 @@ const getConvo = async (user, conversationId) => {
|
||||
|
||||
module.exports = {
|
||||
Conversation,
|
||||
saveConvo: async (user, { conversationId, newConversationId, ...convo }) => {
|
||||
searchConversation,
|
||||
/**
|
||||
* Saves a conversation to the database.
|
||||
* @param {Object} req - The request object.
|
||||
* @param {string} conversationId - The conversation's ID.
|
||||
* @param {Object} metadata - Additional metadata to log for operation.
|
||||
* @returns {Promise<TConversation>} The conversation object.
|
||||
*/
|
||||
saveConvo: async (req, { conversationId, newConversationId, ...convo }, metadata) => {
|
||||
try {
|
||||
if (metadata && metadata?.context) {
|
||||
logger.debug(`[saveConvo] ${metadata.context}`);
|
||||
}
|
||||
const messages = await getMessages({ conversationId }, '_id');
|
||||
const update = { ...convo, messages, user };
|
||||
const update = { ...convo, messages, user: req.user.id };
|
||||
if (newConversationId) {
|
||||
update.conversationId = newConversationId;
|
||||
}
|
||||
|
||||
return await Conversation.findOneAndUpdate({ conversationId: conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: true,
|
||||
});
|
||||
const conversation = await Conversation.findOneAndUpdate(
|
||||
{ conversationId, user: req.user.id },
|
||||
update,
|
||||
{
|
||||
new: true,
|
||||
upsert: true,
|
||||
},
|
||||
);
|
||||
|
||||
return conversation.toObject();
|
||||
} catch (error) {
|
||||
logger.error('[saveConvo] Error saving conversation', error);
|
||||
if (metadata && metadata?.context) {
|
||||
logger.info(`[saveConvo] ${metadata.context}`);
|
||||
}
|
||||
return { message: 'Error saving conversation' };
|
||||
}
|
||||
},
|
||||
@@ -54,13 +88,16 @@ module.exports = {
|
||||
throw new Error('Failed to save conversations in bulk.');
|
||||
}
|
||||
},
|
||||
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false) => {
|
||||
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false, tags) => {
|
||||
const query = { user };
|
||||
if (isArchived) {
|
||||
query.isArchived = true;
|
||||
} else {
|
||||
query.$or = [{ isArchived: false }, { isArchived: { $exists: false } }];
|
||||
}
|
||||
if (Array.isArray(tags) && tags.length > 0) {
|
||||
query.tags = { $in: tags };
|
||||
}
|
||||
try {
|
||||
const totalConvos = (await Conversation.countDocuments(query)) || 1;
|
||||
const totalPages = Math.ceil(totalConvos / pageSize);
|
||||
|
||||
242
api/models/ConversationTag.js
Normal file
242
api/models/ConversationTag.js
Normal file
@@ -0,0 +1,242 @@
|
||||
const ConversationTag = require('./schema/conversationTagSchema');
|
||||
const Conversation = require('./schema/convoSchema');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
/**
|
||||
* Retrieves all conversation tags for a user.
|
||||
* @param {string} user - The user ID.
|
||||
* @returns {Promise<Array>} An array of conversation tags.
|
||||
*/
|
||||
const getConversationTags = async (user) => {
|
||||
try {
|
||||
return await ConversationTag.find({ user }).sort({ position: 1 }).lean();
|
||||
} catch (error) {
|
||||
logger.error('[getConversationTags] Error getting conversation tags', error);
|
||||
throw new Error('Error getting conversation tags');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new conversation tag.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {Object} data - The tag data.
|
||||
* @param {string} data.tag - The tag name.
|
||||
* @param {string} [data.description] - The tag description.
|
||||
* @param {boolean} [data.addToConversation] - Whether to add the tag to a conversation.
|
||||
* @param {string} [data.conversationId] - The conversation ID to add the tag to.
|
||||
* @returns {Promise<Object>} The created tag.
|
||||
*/
|
||||
const createConversationTag = async (user, data) => {
|
||||
try {
|
||||
const { tag, description, addToConversation, conversationId } = data;
|
||||
|
||||
const existingTag = await ConversationTag.findOne({ user, tag }).lean();
|
||||
if (existingTag) {
|
||||
return existingTag;
|
||||
}
|
||||
|
||||
const maxPosition = await ConversationTag.findOne({ user }).sort('-position').lean();
|
||||
const position = (maxPosition?.position || 0) + 1;
|
||||
|
||||
const newTag = await ConversationTag.findOneAndUpdate(
|
||||
{ tag, user },
|
||||
{
|
||||
tag,
|
||||
user,
|
||||
count: addToConversation ? 1 : 0,
|
||||
position,
|
||||
description,
|
||||
$setOnInsert: { createdAt: new Date() },
|
||||
},
|
||||
{
|
||||
new: true,
|
||||
upsert: true,
|
||||
lean: true,
|
||||
},
|
||||
);
|
||||
|
||||
if (addToConversation && conversationId) {
|
||||
await Conversation.findOneAndUpdate(
|
||||
{ user, conversationId },
|
||||
{ $addToSet: { tags: tag } },
|
||||
{ new: true },
|
||||
);
|
||||
}
|
||||
|
||||
return newTag;
|
||||
} catch (error) {
|
||||
logger.error('[createConversationTag] Error creating conversation tag', error);
|
||||
throw new Error('Error creating conversation tag');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Updates an existing conversation tag.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {string} oldTag - The current tag name.
|
||||
* @param {Object} data - The updated tag data.
|
||||
* @param {string} [data.tag] - The new tag name.
|
||||
* @param {string} [data.description] - The updated description.
|
||||
* @param {number} [data.position] - The new position.
|
||||
* @returns {Promise<Object>} The updated tag.
|
||||
*/
|
||||
const updateConversationTag = async (user, oldTag, data) => {
|
||||
try {
|
||||
const { tag: newTag, description, position } = data;
|
||||
|
||||
const existingTag = await ConversationTag.findOne({ user, tag: oldTag }).lean();
|
||||
if (!existingTag) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (newTag && newTag !== oldTag) {
|
||||
const tagAlreadyExists = await ConversationTag.findOne({ user, tag: newTag }).lean();
|
||||
if (tagAlreadyExists) {
|
||||
throw new Error('Tag already exists');
|
||||
}
|
||||
|
||||
await Conversation.updateMany({ user, tags: oldTag }, { $set: { 'tags.$': newTag } });
|
||||
}
|
||||
|
||||
const updateData = {};
|
||||
if (newTag) {
|
||||
updateData.tag = newTag;
|
||||
}
|
||||
if (description !== undefined) {
|
||||
updateData.description = description;
|
||||
}
|
||||
if (position !== undefined) {
|
||||
await adjustPositions(user, existingTag.position, position);
|
||||
updateData.position = position;
|
||||
}
|
||||
|
||||
return await ConversationTag.findOneAndUpdate({ user, tag: oldTag }, updateData, {
|
||||
new: true,
|
||||
lean: true,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[updateConversationTag] Error updating conversation tag', error);
|
||||
throw new Error('Error updating conversation tag');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Adjusts positions of tags when a tag's position is changed.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {number} oldPosition - The old position of the tag.
|
||||
* @param {number} newPosition - The new position of the tag.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const adjustPositions = async (user, oldPosition, newPosition) => {
|
||||
if (oldPosition === newPosition) {
|
||||
return;
|
||||
}
|
||||
|
||||
const update = oldPosition < newPosition ? { $inc: { position: -1 } } : { $inc: { position: 1 } };
|
||||
|
||||
await ConversationTag.updateMany(
|
||||
{
|
||||
user,
|
||||
position: {
|
||||
$gt: Math.min(oldPosition, newPosition),
|
||||
$lte: Math.max(oldPosition, newPosition),
|
||||
},
|
||||
},
|
||||
update,
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes a conversation tag.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {string} tag - The tag to delete.
|
||||
* @returns {Promise<Object>} The deleted tag.
|
||||
*/
|
||||
const deleteConversationTag = async (user, tag) => {
|
||||
try {
|
||||
const deletedTag = await ConversationTag.findOneAndDelete({ user, tag }).lean();
|
||||
if (!deletedTag) {
|
||||
return null;
|
||||
}
|
||||
|
||||
await Conversation.updateMany({ user, tags: tag }, { $pull: { tags: tag } });
|
||||
|
||||
await ConversationTag.updateMany(
|
||||
{ user, position: { $gt: deletedTag.position } },
|
||||
{ $inc: { position: -1 } },
|
||||
);
|
||||
|
||||
return deletedTag;
|
||||
} catch (error) {
|
||||
logger.error('[deleteConversationTag] Error deleting conversation tag', error);
|
||||
throw new Error('Error deleting conversation tag');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Updates tags for a specific conversation.
|
||||
* @param {string} user - The user ID.
|
||||
* @param {string} conversationId - The conversation ID.
|
||||
* @param {string[]} tags - The new set of tags for the conversation.
|
||||
* @returns {Promise<string[]>} The updated list of tags for the conversation.
|
||||
*/
|
||||
const updateTagsForConversation = async (user, conversationId, tags) => {
|
||||
try {
|
||||
const conversation = await Conversation.findOne({ user, conversationId }).lean();
|
||||
if (!conversation) {
|
||||
throw new Error('Conversation not found');
|
||||
}
|
||||
|
||||
const oldTags = new Set(conversation.tags);
|
||||
const newTags = new Set(tags);
|
||||
|
||||
const addedTags = [...newTags].filter((tag) => !oldTags.has(tag));
|
||||
const removedTags = [...oldTags].filter((tag) => !newTags.has(tag));
|
||||
|
||||
const bulkOps = [];
|
||||
|
||||
for (const tag of addedTags) {
|
||||
bulkOps.push({
|
||||
updateOne: {
|
||||
filter: { user, tag },
|
||||
update: { $inc: { count: 1 } },
|
||||
upsert: true,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
for (const tag of removedTags) {
|
||||
bulkOps.push({
|
||||
updateOne: {
|
||||
filter: { user, tag },
|
||||
update: { $inc: { count: -1 } },
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
if (bulkOps.length > 0) {
|
||||
await ConversationTag.bulkWrite(bulkOps);
|
||||
}
|
||||
|
||||
const updatedConversation = (
|
||||
await Conversation.findOneAndUpdate(
|
||||
{ user, conversationId },
|
||||
{ $set: { tags: [...newTags] } },
|
||||
{ new: true },
|
||||
)
|
||||
).toObject();
|
||||
|
||||
return updatedConversation.tags;
|
||||
} catch (error) {
|
||||
logger.error('[updateTagsForConversation] Error updating tags', error);
|
||||
throw new Error('Error updating tags for conversation');
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getConversationTags,
|
||||
createConversationTag,
|
||||
updateConversationTag,
|
||||
deleteConversationTag,
|
||||
updateTagsForConversation,
|
||||
};
|
||||
@@ -1,209 +1,342 @@
|
||||
const { z } = require('zod');
|
||||
const Message = require('./schema/messageSchema');
|
||||
const logger = require('~/config/winston');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const idSchema = z.string().uuid();
|
||||
|
||||
/**
|
||||
* Saves a message in the database.
|
||||
*
|
||||
* @async
|
||||
* @function saveMessage
|
||||
* @param {Express.Request} req - The request object containing user information.
|
||||
* @param {Object} params - The message data object.
|
||||
* @param {string} params.endpoint - The endpoint where the message originated.
|
||||
* @param {string} params.iconURL - The URL of the sender's icon.
|
||||
* @param {string} params.messageId - The unique identifier for the message.
|
||||
* @param {string} params.newMessageId - The new unique identifier for the message (if applicable).
|
||||
* @param {string} params.conversationId - The identifier of the conversation.
|
||||
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
|
||||
* @param {string} params.sender - The identifier of the sender.
|
||||
* @param {string} params.text - The text content of the message.
|
||||
* @param {boolean} params.isCreatedByUser - Indicates if the message was created by the user.
|
||||
* @param {string} [params.error] - Any error associated with the message.
|
||||
* @param {boolean} [params.unfinished] - Indicates if the message is unfinished.
|
||||
* @param {Object[]} [params.files] - An array of files associated with the message.
|
||||
* @param {boolean} [params.isEdited] - Indicates if the message was edited.
|
||||
* @param {string} [params.finish_reason] - Reason for finishing the message.
|
||||
* @param {number} [params.tokenCount] - The number of tokens in the message.
|
||||
* @param {string} [params.plugin] - Plugin associated with the message.
|
||||
* @param {string[]} [params.plugins] - An array of plugins associated with the message.
|
||||
* @param {string} [params.model] - The model used to generate the message.
|
||||
* @param {Object} [metadata] - Additional metadata for this operation
|
||||
* @param {string} [metadata.context] - The context of the operation
|
||||
* @returns {Promise<TMessage>} The updated or newly inserted message document.
|
||||
* @throws {Error} If there is an error in saving the message.
|
||||
*/
|
||||
async function saveMessage(req, params, metadata) {
|
||||
try {
|
||||
if (!req || !req.user || !req.user.id) {
|
||||
throw new Error('User not authenticated');
|
||||
}
|
||||
|
||||
const {
|
||||
text,
|
||||
error,
|
||||
model,
|
||||
files,
|
||||
plugin,
|
||||
sender,
|
||||
plugins,
|
||||
iconURL,
|
||||
endpoint,
|
||||
isEdited,
|
||||
messageId,
|
||||
unfinished,
|
||||
tokenCount,
|
||||
newMessageId,
|
||||
finish_reason,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
isCreatedByUser,
|
||||
} = params;
|
||||
|
||||
const validConvoId = idSchema.safeParse(conversationId);
|
||||
if (!validConvoId.success) {
|
||||
logger.warn(`Invalid conversation ID: ${conversationId}`);
|
||||
if (metadata && metadata?.context) {
|
||||
logger.info(`---\`saveMessage\` context: ${metadata.context}`);
|
||||
}
|
||||
|
||||
logger.info(`---Invalid conversation ID Params:
|
||||
|
||||
${JSON.stringify(params, null, 2)}
|
||||
|
||||
`);
|
||||
return;
|
||||
}
|
||||
|
||||
const update = {
|
||||
user: req.user.id,
|
||||
iconURL,
|
||||
endpoint,
|
||||
messageId: newMessageId || messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender,
|
||||
text,
|
||||
isCreatedByUser,
|
||||
isEdited,
|
||||
finish_reason,
|
||||
error,
|
||||
unfinished,
|
||||
tokenCount,
|
||||
plugin,
|
||||
plugins,
|
||||
model,
|
||||
};
|
||||
|
||||
if (files) {
|
||||
update.files = files;
|
||||
}
|
||||
|
||||
const message = await Message.findOneAndUpdate({ messageId, user: req.user.id }, update, {
|
||||
upsert: true,
|
||||
new: true,
|
||||
});
|
||||
|
||||
return message.toObject();
|
||||
} catch (err) {
|
||||
logger.error('Error saving message:', err);
|
||||
if (metadata && metadata?.context) {
|
||||
logger.info(`---\`saveMessage\` context: ${metadata.context}`);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves multiple messages in the database in bulk.
|
||||
*
|
||||
* @async
|
||||
* @function bulkSaveMessages
|
||||
* @param {Object[]} messages - An array of message objects to save.
|
||||
* @returns {Promise<Object>} The result of the bulk write operation.
|
||||
* @throws {Error} If there is an error in saving messages in bulk.
|
||||
*/
|
||||
async function bulkSaveMessages(messages) {
|
||||
try {
|
||||
const bulkOps = messages.map((message) => ({
|
||||
updateOne: {
|
||||
filter: { messageId: message.messageId },
|
||||
update: message,
|
||||
upsert: true,
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await Message.bulkWrite(bulkOps);
|
||||
return result;
|
||||
} catch (err) {
|
||||
logger.error('Error saving messages in bulk:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Records a message in the database.
|
||||
*
|
||||
* @async
|
||||
* @function recordMessage
|
||||
* @param {Object} params - The message data object.
|
||||
* @param {string} params.user - The identifier of the user.
|
||||
* @param {string} params.endpoint - The endpoint where the message originated.
|
||||
* @param {string} params.messageId - The unique identifier for the message.
|
||||
* @param {string} params.conversationId - The identifier of the conversation.
|
||||
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
|
||||
* @param {Partial<TMessage>} rest - Any additional properties from the TMessage typedef not explicitly listed.
|
||||
* @returns {Promise<Object>} The updated or newly inserted message document.
|
||||
* @throws {Error} If there is an error in saving the message.
|
||||
*/
|
||||
async function recordMessage({
|
||||
user,
|
||||
endpoint,
|
||||
messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
...rest
|
||||
}) {
|
||||
try {
|
||||
// No parsing of convoId as may use threadId
|
||||
const message = {
|
||||
user,
|
||||
endpoint,
|
||||
messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
...rest,
|
||||
};
|
||||
|
||||
return await Message.findOneAndUpdate({ user, messageId }, message, {
|
||||
upsert: true,
|
||||
new: true,
|
||||
});
|
||||
} catch (err) {
|
||||
logger.error('Error recording message:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the text of a message.
|
||||
*
|
||||
* @async
|
||||
* @function updateMessageText
|
||||
* @param {Object} params - The update data object.
|
||||
* @param {Object} req - The request object.
|
||||
* @param {string} params.messageId - The unique identifier for the message.
|
||||
* @param {string} params.text - The new text content of the message.
|
||||
* @returns {Promise<void>}
|
||||
* @throws {Error} If there is an error in updating the message text.
|
||||
*/
|
||||
async function updateMessageText(req, { messageId, text }) {
|
||||
try {
|
||||
await Message.updateOne({ messageId, user: req.user.id }, { text });
|
||||
} catch (err) {
|
||||
logger.error('Error updating message text:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates a message.
|
||||
*
|
||||
* @async
|
||||
* @function updateMessage
|
||||
* @param {Object} message - The message object containing update data.
|
||||
* @param {Object} req - The request object.
|
||||
* @param {string} message.messageId - The unique identifier for the message.
|
||||
* @param {string} [message.text] - The new text content of the message.
|
||||
* @param {Object[]} [message.files] - The files associated with the message.
|
||||
* @param {boolean} [message.isCreatedByUser] - Indicates if the message was created by the user.
|
||||
* @param {string} [message.sender] - The identifier of the sender.
|
||||
* @param {number} [message.tokenCount] - The number of tokens in the message.
|
||||
* @param {Object} [metadata] - The operation metadata
|
||||
* @param {string} [metadata.context] - The operation metadata
|
||||
* @returns {Promise<TMessage>} The updated message document.
|
||||
* @throws {Error} If there is an error in updating the message or if the message is not found.
|
||||
*/
|
||||
async function updateMessage(req, message, metadata) {
|
||||
try {
|
||||
const { messageId, ...update } = message;
|
||||
update.isEdited = true;
|
||||
const updatedMessage = await Message.findOneAndUpdate(
|
||||
{ messageId, user: req.user.id },
|
||||
update,
|
||||
{
|
||||
new: true,
|
||||
},
|
||||
);
|
||||
|
||||
if (!updatedMessage) {
|
||||
throw new Error('Message not found or user not authorized.');
|
||||
}
|
||||
|
||||
return {
|
||||
messageId: updatedMessage.messageId,
|
||||
conversationId: updatedMessage.conversationId,
|
||||
parentMessageId: updatedMessage.parentMessageId,
|
||||
sender: updatedMessage.sender,
|
||||
text: updatedMessage.text,
|
||||
isCreatedByUser: updatedMessage.isCreatedByUser,
|
||||
tokenCount: updatedMessage.tokenCount,
|
||||
isEdited: true,
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error('Error updating message:', err);
|
||||
if (metadata && metadata?.context) {
|
||||
logger.info(`---\`updateMessage\` context: ${metadata.context}`);
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes messages in a conversation since a specific message.
|
||||
*
|
||||
* @async
|
||||
* @function deleteMessagesSince
|
||||
* @param {Object} params - The parameters object.
|
||||
* @param {Object} req - The request object.
|
||||
* @param {string} params.messageId - The unique identifier for the message.
|
||||
* @param {string} params.conversationId - The identifier of the conversation.
|
||||
* @returns {Promise<Number>} The number of deleted messages.
|
||||
* @throws {Error} If there is an error in deleting messages.
|
||||
*/
|
||||
async function deleteMessagesSince(req, { messageId, conversationId }) {
|
||||
try {
|
||||
const message = await Message.findOne({ messageId, user: req.user.id }).lean();
|
||||
|
||||
if (message) {
|
||||
const query = Message.find({ conversationId, user: req.user.id });
|
||||
return await query.deleteMany({
|
||||
createdAt: { $gt: message.createdAt },
|
||||
});
|
||||
}
|
||||
return undefined;
|
||||
} catch (err) {
|
||||
logger.error('Error deleting messages:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves messages from the database.
|
||||
* @async
|
||||
* @function getMessages
|
||||
* @param {Record<string, unknown>} filter - The filter criteria.
|
||||
* @param {string | undefined} [select] - The fields to select.
|
||||
* @returns {Promise<TMessage[]>} The messages that match the filter criteria.
|
||||
* @throws {Error} If there is an error in retrieving messages.
|
||||
*/
|
||||
async function getMessages(filter, select) {
|
||||
try {
|
||||
if (select) {
|
||||
return await Message.find(filter).select(select).sort({ createdAt: 1 }).lean();
|
||||
}
|
||||
|
||||
return await Message.find(filter).sort({ createdAt: 1 }).lean();
|
||||
} catch (err) {
|
||||
logger.error('Error getting messages:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes messages from the database.
|
||||
*
|
||||
* @async
|
||||
* @function deleteMessages
|
||||
* @param {Object} filter - The filter criteria to find messages to delete.
|
||||
* @returns {Promise<Object>} The metadata with count of deleted messages.
|
||||
* @throws {Error} If there is an error in deleting messages.
|
||||
*/
|
||||
async function deleteMessages(filter) {
|
||||
try {
|
||||
return await Message.deleteMany(filter);
|
||||
} catch (err) {
|
||||
logger.error('Error deleting messages:', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
Message,
|
||||
|
||||
async saveMessage({
|
||||
user,
|
||||
endpoint,
|
||||
iconURL,
|
||||
messageId,
|
||||
newMessageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender,
|
||||
text,
|
||||
isCreatedByUser,
|
||||
error,
|
||||
unfinished,
|
||||
files,
|
||||
isEdited,
|
||||
finish_reason,
|
||||
tokenCount,
|
||||
plugin,
|
||||
plugins,
|
||||
model,
|
||||
}) {
|
||||
try {
|
||||
const validConvoId = idSchema.safeParse(conversationId);
|
||||
if (!validConvoId.success) {
|
||||
return;
|
||||
}
|
||||
|
||||
const update = {
|
||||
user,
|
||||
iconURL,
|
||||
endpoint,
|
||||
messageId: newMessageId || messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender,
|
||||
text,
|
||||
isCreatedByUser,
|
||||
isEdited,
|
||||
finish_reason,
|
||||
error,
|
||||
unfinished,
|
||||
tokenCount,
|
||||
plugin,
|
||||
plugins,
|
||||
model,
|
||||
};
|
||||
|
||||
if (files) {
|
||||
update.files = files;
|
||||
}
|
||||
// may also need to update the conversation here
|
||||
await Message.findOneAndUpdate({ messageId }, update, { upsert: true, new: true });
|
||||
|
||||
return {
|
||||
messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender,
|
||||
text,
|
||||
isCreatedByUser,
|
||||
tokenCount,
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error('Error saving message:', err);
|
||||
throw new Error('Failed to save message.');
|
||||
}
|
||||
},
|
||||
|
||||
async bulkSaveMessages(messages) {
|
||||
try {
|
||||
const bulkOps = messages.map((message) => ({
|
||||
updateOne: {
|
||||
filter: { messageId: message.messageId },
|
||||
update: message,
|
||||
upsert: true,
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await Message.bulkWrite(bulkOps);
|
||||
return result;
|
||||
} catch (err) {
|
||||
logger.error('Error saving messages in bulk:', err);
|
||||
throw new Error('Failed to save messages in bulk.');
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Records a message in the database.
|
||||
*
|
||||
* @async
|
||||
* @function recordMessage
|
||||
* @param {Object} params - The message data object.
|
||||
* @param {string} params.user - The identifier of the user.
|
||||
* @param {string} params.endpoint - The endpoint where the message originated.
|
||||
* @param {string} params.messageId - The unique identifier for the message.
|
||||
* @param {string} params.conversationId - The identifier of the conversation.
|
||||
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
|
||||
* @param {Partial<TMessage>} rest - Any additional properties from the TMessage typedef not explicitly listed.
|
||||
* @returns {Promise<Object>} The updated or newly inserted message document.
|
||||
* @throws {Error} If there is an error in saving the message.
|
||||
*/
|
||||
async recordMessage({ user, endpoint, messageId, conversationId, parentMessageId, ...rest }) {
|
||||
try {
|
||||
// No parsing of convoId as may use threadId
|
||||
const message = {
|
||||
user,
|
||||
endpoint,
|
||||
messageId,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
...rest,
|
||||
};
|
||||
|
||||
return await Message.findOneAndUpdate({ user, messageId }, message, {
|
||||
upsert: true,
|
||||
new: true,
|
||||
});
|
||||
} catch (err) {
|
||||
logger.error('Error saving message:', err);
|
||||
throw new Error('Failed to save message.');
|
||||
}
|
||||
},
|
||||
async updateMessageText({ messageId, text }) {
|
||||
try {
|
||||
await Message.updateOne({ messageId }, { text });
|
||||
} catch (err) {
|
||||
logger.error('Error updating message text:', err);
|
||||
throw new Error('Failed to update message text.');
|
||||
}
|
||||
},
|
||||
async updateMessage(message) {
|
||||
try {
|
||||
const { messageId, ...update } = message;
|
||||
update.isEdited = true;
|
||||
const updatedMessage = await Message.findOneAndUpdate({ messageId }, update, {
|
||||
new: true,
|
||||
});
|
||||
|
||||
if (!updatedMessage) {
|
||||
throw new Error('Message not found.');
|
||||
}
|
||||
|
||||
return {
|
||||
messageId: updatedMessage.messageId,
|
||||
conversationId: updatedMessage.conversationId,
|
||||
parentMessageId: updatedMessage.parentMessageId,
|
||||
sender: updatedMessage.sender,
|
||||
text: updatedMessage.text,
|
||||
isCreatedByUser: updatedMessage.isCreatedByUser,
|
||||
tokenCount: updatedMessage.tokenCount,
|
||||
isEdited: true,
|
||||
};
|
||||
} catch (err) {
|
||||
logger.error('Error updating message:', err);
|
||||
throw new Error('Failed to update message.');
|
||||
}
|
||||
},
|
||||
async deleteMessagesSince({ messageId, conversationId }) {
|
||||
try {
|
||||
const message = await Message.findOne({ messageId }).lean();
|
||||
|
||||
if (message) {
|
||||
return await Message.find({ conversationId }).deleteMany({
|
||||
createdAt: { $gt: message.createdAt },
|
||||
});
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error('Error deleting messages:', err);
|
||||
throw new Error('Failed to delete messages.');
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Retrieves messages from the database.
|
||||
* @param {Record<string, unknown>} filter
|
||||
* @param {string | undefined} [select]
|
||||
* @returns
|
||||
*/
|
||||
async getMessages(filter, select) {
|
||||
try {
|
||||
if (select) {
|
||||
return await Message.find(filter).select(select).sort({ createdAt: 1 }).lean();
|
||||
}
|
||||
|
||||
return await Message.find(filter).sort({ createdAt: 1 }).lean();
|
||||
} catch (err) {
|
||||
logger.error('Error getting messages:', err);
|
||||
throw new Error('Failed to get messages.');
|
||||
}
|
||||
},
|
||||
|
||||
async deleteMessages(filter) {
|
||||
try {
|
||||
return await Message.deleteMany(filter);
|
||||
} catch (err) {
|
||||
logger.error('Error deleting messages:', err);
|
||||
throw new Error('Failed to delete messages.');
|
||||
}
|
||||
},
|
||||
saveMessage,
|
||||
bulkSaveMessages,
|
||||
recordMessage,
|
||||
updateMessageText,
|
||||
updateMessage,
|
||||
deleteMessagesSince,
|
||||
getMessages,
|
||||
deleteMessages,
|
||||
};
|
||||
|
||||
239
api/models/Message.spec.js
Normal file
239
api/models/Message.spec.js
Normal file
@@ -0,0 +1,239 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { v4: uuidv4 } = require('uuid');
|
||||
|
||||
jest.mock('mongoose');
|
||||
|
||||
const mockFindQuery = {
|
||||
select: jest.fn().mockReturnThis(),
|
||||
sort: jest.fn().mockReturnThis(),
|
||||
lean: jest.fn().mockReturnThis(),
|
||||
deleteMany: jest.fn().mockResolvedValue({ deletedCount: 1 }),
|
||||
};
|
||||
|
||||
const mockSchema = {
|
||||
findOneAndUpdate: jest.fn(),
|
||||
updateOne: jest.fn(),
|
||||
findOne: jest.fn(() => ({
|
||||
lean: jest.fn(),
|
||||
})),
|
||||
find: jest.fn(() => mockFindQuery),
|
||||
deleteMany: jest.fn(),
|
||||
};
|
||||
|
||||
mongoose.model.mockReturnValue(mockSchema);
|
||||
|
||||
jest.mock('~/models/schema/messageSchema', () => mockSchema);
|
||||
|
||||
jest.mock('~/config/winston', () => ({
|
||||
error: jest.fn(),
|
||||
}));
|
||||
|
||||
const {
|
||||
saveMessage,
|
||||
getMessages,
|
||||
updateMessage,
|
||||
deleteMessages,
|
||||
updateMessageText,
|
||||
deleteMessagesSince,
|
||||
} = require('~/models/Message');
|
||||
|
||||
describe('Message Operations', () => {
|
||||
let mockReq;
|
||||
let mockMessage;
|
||||
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
|
||||
mockReq = {
|
||||
user: { id: 'user123' },
|
||||
};
|
||||
|
||||
mockMessage = {
|
||||
messageId: 'msg123',
|
||||
conversationId: uuidv4(),
|
||||
text: 'Hello, world!',
|
||||
user: 'user123',
|
||||
};
|
||||
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue({
|
||||
toObject: () => mockMessage,
|
||||
});
|
||||
});
|
||||
|
||||
describe('saveMessage', () => {
|
||||
it('should save a message for an authenticated user', async () => {
|
||||
const result = await saveMessage(mockReq, mockMessage);
|
||||
expect(result).toEqual(mockMessage);
|
||||
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
|
||||
{ messageId: 'msg123', user: 'user123' },
|
||||
expect.objectContaining({ user: 'user123' }),
|
||||
expect.any(Object),
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error for unauthenticated user', async () => {
|
||||
mockReq.user = null;
|
||||
await expect(saveMessage(mockReq, mockMessage)).rejects.toThrow('User not authenticated');
|
||||
});
|
||||
|
||||
it('should throw an error for invalid conversation ID', async () => {
|
||||
mockMessage.conversationId = 'invalid-id';
|
||||
await expect(saveMessage(mockReq, mockMessage)).resolves.toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateMessageText', () => {
|
||||
it('should update message text for the authenticated user', async () => {
|
||||
await updateMessageText(mockReq, { messageId: 'msg123', text: 'Updated text' });
|
||||
expect(mockSchema.updateOne).toHaveBeenCalledWith(
|
||||
{ messageId: 'msg123', user: 'user123' },
|
||||
{ text: 'Updated text' },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('updateMessage', () => {
|
||||
it('should update a message for the authenticated user', async () => {
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue(mockMessage);
|
||||
const result = await updateMessage(mockReq, { messageId: 'msg123', text: 'Updated text' });
|
||||
expect(result).toEqual(
|
||||
expect.objectContaining({
|
||||
messageId: 'msg123',
|
||||
text: 'Hello, world!',
|
||||
isEdited: true,
|
||||
}),
|
||||
);
|
||||
});
|
||||
|
||||
it('should throw an error if message is not found', async () => {
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue(null);
|
||||
await expect(
|
||||
updateMessage(mockReq, { messageId: 'nonexistent', text: 'Test' }),
|
||||
).rejects.toThrow('Message not found or user not authorized.');
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteMessagesSince', () => {
|
||||
it('should delete messages only for the authenticated user', async () => {
|
||||
mockSchema.findOne().lean.mockResolvedValueOnce({ createdAt: new Date() });
|
||||
mockFindQuery.deleteMany.mockResolvedValueOnce({ deletedCount: 1 });
|
||||
const result = await deleteMessagesSince(mockReq, {
|
||||
messageId: 'msg123',
|
||||
conversationId: 'convo123',
|
||||
});
|
||||
expect(mockSchema.findOne).toHaveBeenCalledWith({ messageId: 'msg123', user: 'user123' });
|
||||
expect(mockSchema.find).not.toHaveBeenCalled();
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should return undefined if no message is found', async () => {
|
||||
mockSchema.findOne().lean.mockResolvedValueOnce(null);
|
||||
const result = await deleteMessagesSince(mockReq, {
|
||||
messageId: 'nonexistent',
|
||||
conversationId: 'convo123',
|
||||
});
|
||||
expect(result).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
describe('getMessages', () => {
|
||||
it('should retrieve messages with the correct filter', async () => {
|
||||
const filter = { conversationId: 'convo123' };
|
||||
await getMessages(filter);
|
||||
expect(mockSchema.find).toHaveBeenCalledWith(filter);
|
||||
expect(mockFindQuery.sort).toHaveBeenCalledWith({ createdAt: 1 });
|
||||
expect(mockFindQuery.lean).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe('deleteMessages', () => {
|
||||
it('should delete messages with the correct filter', async () => {
|
||||
await deleteMessages({ user: 'user123' });
|
||||
expect(mockSchema.deleteMany).toHaveBeenCalledWith({ user: 'user123' });
|
||||
});
|
||||
});
|
||||
|
||||
describe('Conversation Hijacking Prevention', () => {
|
||||
it('should not allow editing a message in another user\'s conversation', async () => {
|
||||
const attackerReq = { user: { id: 'attacker123' } };
|
||||
const victimConversationId = 'victim-convo-123';
|
||||
const victimMessageId = 'victim-msg-123';
|
||||
|
||||
mockSchema.findOneAndUpdate.mockResolvedValue(null);
|
||||
|
||||
await expect(
|
||||
updateMessage(attackerReq, {
|
||||
messageId: victimMessageId,
|
||||
conversationId: victimConversationId,
|
||||
text: 'Hacked message',
|
||||
}),
|
||||
).rejects.toThrow('Message not found or user not authorized.');
|
||||
|
||||
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
|
||||
{ messageId: victimMessageId, user: 'attacker123' },
|
||||
expect.anything(),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should not allow deleting messages from another user\'s conversation', async () => {
|
||||
const attackerReq = { user: { id: 'attacker123' } };
|
||||
const victimConversationId = 'victim-convo-123';
|
||||
const victimMessageId = 'victim-msg-123';
|
||||
|
||||
mockSchema.findOne().lean.mockResolvedValueOnce(null); // Simulating message not found for this user
|
||||
const result = await deleteMessagesSince(attackerReq, {
|
||||
messageId: victimMessageId,
|
||||
conversationId: victimConversationId,
|
||||
});
|
||||
|
||||
expect(result).toBeUndefined();
|
||||
expect(mockSchema.findOne).toHaveBeenCalledWith({
|
||||
messageId: victimMessageId,
|
||||
user: 'attacker123',
|
||||
});
|
||||
});
|
||||
|
||||
it('should not allow inserting a new message into another user\'s conversation', async () => {
|
||||
const attackerReq = { user: { id: 'attacker123' } };
|
||||
const victimConversationId = uuidv4(); // Use a valid UUID
|
||||
|
||||
await expect(
|
||||
saveMessage(attackerReq, {
|
||||
conversationId: victimConversationId,
|
||||
text: 'Inserted malicious message',
|
||||
messageId: 'new-msg-123',
|
||||
}),
|
||||
).resolves.not.toThrow(); // It should not throw an error
|
||||
|
||||
// Check that the message was saved with the attacker's user ID
|
||||
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
|
||||
{ messageId: 'new-msg-123', user: 'attacker123' },
|
||||
expect.objectContaining({
|
||||
user: 'attacker123',
|
||||
conversationId: victimConversationId,
|
||||
}),
|
||||
expect.anything(),
|
||||
);
|
||||
});
|
||||
|
||||
it('should allow retrieving messages from any conversation', async () => {
|
||||
const victimConversationId = 'victim-convo-123';
|
||||
|
||||
await getMessages({ conversationId: victimConversationId });
|
||||
|
||||
expect(mockSchema.find).toHaveBeenCalledWith({
|
||||
conversationId: victimConversationId,
|
||||
});
|
||||
|
||||
mockSchema.find.mockReturnValueOnce({
|
||||
select: jest.fn().mockReturnThis(),
|
||||
sort: jest.fn().mockReturnThis(),
|
||||
lean: jest.fn().mockResolvedValue([{ text: 'Test message' }]),
|
||||
});
|
||||
|
||||
const result = await getMessages({ conversationId: victimConversationId });
|
||||
expect(result).toEqual([{ text: 'Test message' }]);
|
||||
});
|
||||
});
|
||||
});
|
||||
90
api/models/Project.js
Normal file
90
api/models/Project.js
Normal file
@@ -0,0 +1,90 @@
|
||||
const { model } = require('mongoose');
|
||||
const projectSchema = require('~/models/schema/projectSchema');
|
||||
|
||||
const Project = model('Project', projectSchema);
|
||||
|
||||
/**
|
||||
* Retrieve a project by ID and convert the found project document to a plain object.
|
||||
*
|
||||
* @param {string} projectId - The ID of the project to find and return as a plain object.
|
||||
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
|
||||
* @returns {Promise<MongoProject>} A plain object representing the project document, or `null` if no project is found.
|
||||
*/
|
||||
const getProjectById = async function (projectId, fieldsToSelect = null) {
|
||||
const query = Project.findById(projectId);
|
||||
|
||||
if (fieldsToSelect) {
|
||||
query.select(fieldsToSelect);
|
||||
}
|
||||
|
||||
return await query.lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieve a project by name and convert the found project document to a plain object.
|
||||
* If the project with the given name doesn't exist and the name is "instance", create it and return the lean version.
|
||||
*
|
||||
* @param {string} projectName - The name of the project to find or create.
|
||||
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
|
||||
* @returns {Promise<MongoProject>} A plain object representing the project document.
|
||||
*/
|
||||
const getProjectByName = async function (projectName, fieldsToSelect = null) {
|
||||
const query = { name: projectName };
|
||||
const update = { $setOnInsert: { name: projectName } };
|
||||
const options = {
|
||||
new: true,
|
||||
upsert: projectName === 'instance',
|
||||
lean: true,
|
||||
select: fieldsToSelect,
|
||||
};
|
||||
|
||||
return await Project.findOneAndUpdate(query, update, options);
|
||||
};
|
||||
|
||||
/**
|
||||
* Add an array of prompt group IDs to a project's promptGroupIds array, ensuring uniqueness.
|
||||
*
|
||||
* @param {string} projectId - The ID of the project to update.
|
||||
* @param {string[]} promptGroupIds - The array of prompt group IDs to add to the project.
|
||||
* @returns {Promise<MongoProject>} The updated project document.
|
||||
*/
|
||||
const addGroupIdsToProject = async function (projectId, promptGroupIds) {
|
||||
return await Project.findByIdAndUpdate(
|
||||
projectId,
|
||||
{ $addToSet: { promptGroupIds: { $each: promptGroupIds } } },
|
||||
{ new: true },
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove an array of prompt group IDs from a project's promptGroupIds array.
|
||||
*
|
||||
* @param {string} projectId - The ID of the project to update.
|
||||
* @param {string[]} promptGroupIds - The array of prompt group IDs to remove from the project.
|
||||
* @returns {Promise<MongoProject>} The updated project document.
|
||||
*/
|
||||
const removeGroupIdsFromProject = async function (projectId, promptGroupIds) {
|
||||
return await Project.findByIdAndUpdate(
|
||||
projectId,
|
||||
{ $pull: { promptGroupIds: { $in: promptGroupIds } } },
|
||||
{ new: true },
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Remove a prompt group ID from all projects.
|
||||
*
|
||||
* @param {string} promptGroupId - The ID of the prompt group to remove from projects.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const removeGroupFromAllProjects = async (promptGroupId) => {
|
||||
await Project.updateMany({}, { $pull: { promptGroupIds: promptGroupId } });
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getProjectById,
|
||||
getProjectByName,
|
||||
addGroupIdsToProject,
|
||||
removeGroupIdsFromProject,
|
||||
removeGroupFromAllProjects,
|
||||
};
|
||||
@@ -1,52 +1,528 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { ObjectId } = require('mongodb');
|
||||
const { SystemRoles, SystemCategories } = require('librechat-data-provider');
|
||||
const {
|
||||
getProjectByName,
|
||||
addGroupIdsToProject,
|
||||
removeGroupIdsFromProject,
|
||||
removeGroupFromAllProjects,
|
||||
} = require('./Project');
|
||||
const { Prompt, PromptGroup } = require('./schema/promptSchema');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const promptSchema = mongoose.Schema(
|
||||
{
|
||||
title: {
|
||||
type: String,
|
||||
required: true,
|
||||
/**
|
||||
* Create a pipeline for the aggregation to get prompt groups
|
||||
* @param {Object} query
|
||||
* @param {number} skip
|
||||
* @param {number} limit
|
||||
* @returns {[Object]} - The pipeline for the aggregation
|
||||
*/
|
||||
const createGroupPipeline = (query, skip, limit) => {
|
||||
return [
|
||||
{ $match: query },
|
||||
{ $sort: { createdAt: -1 } },
|
||||
{ $skip: skip },
|
||||
{ $limit: limit },
|
||||
{
|
||||
$lookup: {
|
||||
from: 'prompts',
|
||||
localField: 'productionId',
|
||||
foreignField: '_id',
|
||||
as: 'productionPrompt',
|
||||
},
|
||||
},
|
||||
prompt: {
|
||||
type: String,
|
||||
required: true,
|
||||
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
|
||||
{
|
||||
$project: {
|
||||
name: 1,
|
||||
numberOfGenerations: 1,
|
||||
oneliner: 1,
|
||||
category: 1,
|
||||
projectIds: 1,
|
||||
productionId: 1,
|
||||
author: 1,
|
||||
authorName: 1,
|
||||
createdAt: 1,
|
||||
updatedAt: 1,
|
||||
'productionPrompt.prompt': 1,
|
||||
// 'productionPrompt._id': 1,
|
||||
// 'productionPrompt.type': 1,
|
||||
},
|
||||
},
|
||||
category: {
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
{ timestamps: true },
|
||||
);
|
||||
];
|
||||
};
|
||||
|
||||
const Prompt = mongoose.models.Prompt || mongoose.model('Prompt', promptSchema);
|
||||
/**
|
||||
* Create a pipeline for the aggregation to get all prompt groups
|
||||
* @param {Object} query
|
||||
* @param {Partial<MongoPromptGroup>} $project
|
||||
* @returns {[Object]} - The pipeline for the aggregation
|
||||
*/
|
||||
const createAllGroupsPipeline = (
|
||||
query,
|
||||
$project = {
|
||||
name: 1,
|
||||
oneliner: 1,
|
||||
category: 1,
|
||||
author: 1,
|
||||
authorName: 1,
|
||||
createdAt: 1,
|
||||
updatedAt: 1,
|
||||
command: 1,
|
||||
'productionPrompt.prompt': 1,
|
||||
},
|
||||
) => {
|
||||
return [
|
||||
{ $match: query },
|
||||
{ $sort: { createdAt: -1 } },
|
||||
{
|
||||
$lookup: {
|
||||
from: 'prompts',
|
||||
localField: 'productionId',
|
||||
foreignField: '_id',
|
||||
as: 'productionPrompt',
|
||||
},
|
||||
},
|
||||
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
|
||||
{
|
||||
$project,
|
||||
},
|
||||
];
|
||||
};
|
||||
|
||||
/**
|
||||
* Get all prompt groups with filters
|
||||
* @param {Object} req
|
||||
* @param {TPromptGroupsWithFilterRequest} filter
|
||||
* @returns {Promise<PromptGroupListResponse>}
|
||||
*/
|
||||
const getAllPromptGroups = async (req, filter) => {
|
||||
try {
|
||||
const { name, ...query } = filter;
|
||||
|
||||
if (!query.author) {
|
||||
throw new Error('Author is required');
|
||||
}
|
||||
|
||||
let searchShared = true;
|
||||
let searchSharedOnly = false;
|
||||
if (name) {
|
||||
query.name = new RegExp(name, 'i');
|
||||
}
|
||||
if (!query.category) {
|
||||
delete query.category;
|
||||
} else if (query.category === SystemCategories.MY_PROMPTS) {
|
||||
searchShared = false;
|
||||
delete query.category;
|
||||
} else if (query.category === SystemCategories.NO_CATEGORY) {
|
||||
query.category = '';
|
||||
} else if (query.category === SystemCategories.SHARED_PROMPTS) {
|
||||
searchSharedOnly = true;
|
||||
delete query.category;
|
||||
}
|
||||
|
||||
let combinedQuery = query;
|
||||
|
||||
if (searchShared) {
|
||||
const project = await getProjectByName('instance', 'promptGroupIds');
|
||||
if (project && project.promptGroupIds.length > 0) {
|
||||
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
|
||||
delete projectQuery.author;
|
||||
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
|
||||
}
|
||||
}
|
||||
|
||||
const promptGroupsPipeline = createAllGroupsPipeline(combinedQuery);
|
||||
return await PromptGroup.aggregate(promptGroupsPipeline).exec();
|
||||
} catch (error) {
|
||||
console.error('Error getting all prompt groups', error);
|
||||
return { message: 'Error getting all prompt groups' };
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Get prompt groups with filters
|
||||
* @param {Object} req
|
||||
* @param {TPromptGroupsWithFilterRequest} filter
|
||||
* @returns {Promise<PromptGroupListResponse>}
|
||||
*/
|
||||
const getPromptGroups = async (req, filter) => {
|
||||
try {
|
||||
const { pageNumber = 1, pageSize = 10, name, ...query } = filter;
|
||||
|
||||
const validatedPageNumber = Math.max(parseInt(pageNumber, 10), 1);
|
||||
const validatedPageSize = Math.max(parseInt(pageSize, 10), 1);
|
||||
|
||||
if (!query.author) {
|
||||
throw new Error('Author is required');
|
||||
}
|
||||
|
||||
let searchShared = true;
|
||||
let searchSharedOnly = false;
|
||||
if (name) {
|
||||
query.name = new RegExp(name, 'i');
|
||||
}
|
||||
if (!query.category) {
|
||||
delete query.category;
|
||||
} else if (query.category === SystemCategories.MY_PROMPTS) {
|
||||
searchShared = false;
|
||||
delete query.category;
|
||||
} else if (query.category === SystemCategories.NO_CATEGORY) {
|
||||
query.category = '';
|
||||
} else if (query.category === SystemCategories.SHARED_PROMPTS) {
|
||||
searchSharedOnly = true;
|
||||
delete query.category;
|
||||
}
|
||||
|
||||
let combinedQuery = query;
|
||||
|
||||
if (searchShared) {
|
||||
// const projects = req.user.projects || []; // TODO: handle multiple projects
|
||||
const project = await getProjectByName('instance', 'promptGroupIds');
|
||||
if (project && project.promptGroupIds.length > 0) {
|
||||
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
|
||||
delete projectQuery.author;
|
||||
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
|
||||
}
|
||||
}
|
||||
|
||||
const skip = (validatedPageNumber - 1) * validatedPageSize;
|
||||
const limit = validatedPageSize;
|
||||
|
||||
const promptGroupsPipeline = createGroupPipeline(combinedQuery, skip, limit);
|
||||
const totalPromptGroupsPipeline = [{ $match: combinedQuery }, { $count: 'total' }];
|
||||
|
||||
const [promptGroupsResults, totalPromptGroupsResults] = await Promise.all([
|
||||
PromptGroup.aggregate(promptGroupsPipeline).exec(),
|
||||
PromptGroup.aggregate(totalPromptGroupsPipeline).exec(),
|
||||
]);
|
||||
|
||||
const promptGroups = promptGroupsResults;
|
||||
const totalPromptGroups =
|
||||
totalPromptGroupsResults.length > 0 ? totalPromptGroupsResults[0].total : 0;
|
||||
|
||||
return {
|
||||
promptGroups,
|
||||
pageNumber: validatedPageNumber.toString(),
|
||||
pageSize: validatedPageSize.toString(),
|
||||
pages: Math.ceil(totalPromptGroups / validatedPageSize).toString(),
|
||||
};
|
||||
} catch (error) {
|
||||
console.error('Error getting prompt groups', error);
|
||||
return { message: 'Error getting prompt groups' };
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
savePrompt: async ({ title, prompt }) => {
|
||||
getPromptGroups,
|
||||
getAllPromptGroups,
|
||||
/**
|
||||
* Create a prompt and its respective group
|
||||
* @param {TCreatePromptRecord} saveData
|
||||
* @returns {Promise<TCreatePromptResponse>}
|
||||
*/
|
||||
createPromptGroup: async (saveData) => {
|
||||
try {
|
||||
await Prompt.create({
|
||||
title,
|
||||
prompt,
|
||||
});
|
||||
return { title, prompt };
|
||||
const { prompt, group, author, authorName } = saveData;
|
||||
|
||||
let newPromptGroup = await PromptGroup.findOneAndUpdate(
|
||||
{ ...group, author, authorName, productionId: null },
|
||||
{ $setOnInsert: { ...group, author, authorName, productionId: null } },
|
||||
{ new: true, upsert: true },
|
||||
)
|
||||
.lean()
|
||||
.select('-__v')
|
||||
.exec();
|
||||
|
||||
const newPrompt = await Prompt.findOneAndUpdate(
|
||||
{ ...prompt, author, groupId: newPromptGroup._id },
|
||||
{ $setOnInsert: { ...prompt, author, groupId: newPromptGroup._id } },
|
||||
{ new: true, upsert: true },
|
||||
)
|
||||
.lean()
|
||||
.select('-__v')
|
||||
.exec();
|
||||
|
||||
newPromptGroup = await PromptGroup.findByIdAndUpdate(
|
||||
newPromptGroup._id,
|
||||
{ productionId: newPrompt._id },
|
||||
{ new: true },
|
||||
)
|
||||
.lean()
|
||||
.select('-__v')
|
||||
.exec();
|
||||
|
||||
return {
|
||||
prompt: newPrompt,
|
||||
group: {
|
||||
...newPromptGroup,
|
||||
productionPrompt: { prompt: newPrompt.prompt },
|
||||
},
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Error saving prompt group', error);
|
||||
throw new Error('Error saving prompt group');
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Save a prompt
|
||||
* @param {TCreatePromptRecord} saveData
|
||||
* @returns {Promise<TCreatePromptResponse>}
|
||||
*/
|
||||
savePrompt: async (saveData) => {
|
||||
try {
|
||||
const { prompt, author } = saveData;
|
||||
const newPromptData = {
|
||||
...prompt,
|
||||
author,
|
||||
};
|
||||
|
||||
/** @type {TPrompt} */
|
||||
let newPrompt;
|
||||
try {
|
||||
newPrompt = await Prompt.create(newPromptData);
|
||||
} catch (error) {
|
||||
if (error?.message?.includes('groupId_1_version_1')) {
|
||||
await Prompt.db.collection('prompts').dropIndex('groupId_1_version_1');
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
newPrompt = await Prompt.create(newPromptData);
|
||||
}
|
||||
|
||||
return { prompt: newPrompt };
|
||||
} catch (error) {
|
||||
logger.error('Error saving prompt', error);
|
||||
return { prompt: 'Error saving prompt' };
|
||||
return { message: 'Error saving prompt' };
|
||||
}
|
||||
},
|
||||
getPrompts: async (filter) => {
|
||||
try {
|
||||
return await Prompt.find(filter).lean();
|
||||
return await Prompt.find(filter).sort({ createdAt: -1 }).lean();
|
||||
} catch (error) {
|
||||
logger.error('Error getting prompts', error);
|
||||
return { prompt: 'Error getting prompts' };
|
||||
return { message: 'Error getting prompts' };
|
||||
}
|
||||
},
|
||||
deletePrompts: async (filter) => {
|
||||
getPrompt: async (filter) => {
|
||||
try {
|
||||
return await Prompt.deleteMany(filter);
|
||||
if (filter.groupId) {
|
||||
filter.groupId = new ObjectId(filter.groupId);
|
||||
}
|
||||
return await Prompt.findOne(filter).lean();
|
||||
} catch (error) {
|
||||
logger.error('Error deleting prompts', error);
|
||||
return { prompt: 'Error deleting prompts' };
|
||||
logger.error('Error getting prompt', error);
|
||||
return { message: 'Error getting prompt' };
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Get prompt groups with filters
|
||||
* @param {TGetRandomPromptsRequest} filter
|
||||
* @returns {Promise<TGetRandomPromptsResponse>}
|
||||
*/
|
||||
getRandomPromptGroups: async (filter) => {
|
||||
try {
|
||||
const result = await PromptGroup.aggregate([
|
||||
{
|
||||
$match: {
|
||||
category: { $ne: '' },
|
||||
},
|
||||
},
|
||||
{
|
||||
$group: {
|
||||
_id: '$category',
|
||||
promptGroup: { $first: '$$ROOT' },
|
||||
},
|
||||
},
|
||||
{
|
||||
$replaceRoot: { newRoot: '$promptGroup' },
|
||||
},
|
||||
{
|
||||
$sample: { size: +filter.limit + +filter.skip },
|
||||
},
|
||||
{
|
||||
$skip: +filter.skip,
|
||||
},
|
||||
{
|
||||
$limit: +filter.limit,
|
||||
},
|
||||
]);
|
||||
return { prompts: result };
|
||||
} catch (error) {
|
||||
logger.error('Error getting prompt groups', error);
|
||||
return { message: 'Error getting prompt groups' };
|
||||
}
|
||||
},
|
||||
getPromptGroupsWithPrompts: async (filter) => {
|
||||
try {
|
||||
return await PromptGroup.findOne(filter)
|
||||
.populate({
|
||||
path: 'prompts',
|
||||
select: '-_id -__v -user',
|
||||
})
|
||||
.select('-_id -__v -user')
|
||||
.lean();
|
||||
} catch (error) {
|
||||
logger.error('Error getting prompt groups', error);
|
||||
return { message: 'Error getting prompt groups' };
|
||||
}
|
||||
},
|
||||
getPromptGroup: async (filter) => {
|
||||
try {
|
||||
return await PromptGroup.findOne(filter).lean();
|
||||
} catch (error) {
|
||||
logger.error('Error getting prompt group', error);
|
||||
return { message: 'Error getting prompt group' };
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Deletes a prompt and its corresponding prompt group if it is the last prompt in the group.
|
||||
*
|
||||
* @param {Object} options - The options for deleting the prompt.
|
||||
* @param {ObjectId|string} options.promptId - The ID of the prompt to delete.
|
||||
* @param {ObjectId|string} options.groupId - The ID of the prompt's group.
|
||||
* @param {ObjectId|string} options.author - The ID of the prompt's author.
|
||||
* @param {string} options.role - The role of the prompt's author.
|
||||
* @return {Promise<TDeletePromptResponse>} An object containing the result of the deletion.
|
||||
* If the prompt was deleted successfully, the object will have a property 'prompt' with the value 'Prompt deleted successfully'.
|
||||
* If the prompt group was deleted successfully, the object will have a property 'promptGroup' with the message 'Prompt group deleted successfully' and id of the deleted group.
|
||||
* If there was an error deleting the prompt, the object will have a property 'message' with the value 'Error deleting prompt'.
|
||||
*/
|
||||
deletePrompt: async ({ promptId, groupId, author, role }) => {
|
||||
const query = { _id: promptId, groupId, author };
|
||||
if (role === SystemRoles.ADMIN) {
|
||||
delete query.author;
|
||||
}
|
||||
const { deletedCount } = await Prompt.deleteOne(query);
|
||||
if (deletedCount === 0) {
|
||||
throw new Error('Failed to delete the prompt');
|
||||
}
|
||||
|
||||
const remainingPrompts = await Prompt.find({ groupId })
|
||||
.select('_id')
|
||||
.sort({ createdAt: 1 })
|
||||
.lean();
|
||||
|
||||
if (remainingPrompts.length === 0) {
|
||||
await PromptGroup.deleteOne({ _id: groupId });
|
||||
await removeGroupFromAllProjects(groupId);
|
||||
|
||||
return {
|
||||
prompt: 'Prompt deleted successfully',
|
||||
promptGroup: {
|
||||
message: 'Prompt group deleted successfully',
|
||||
id: groupId,
|
||||
},
|
||||
};
|
||||
} else {
|
||||
const promptGroup = await PromptGroup.findById(groupId).lean();
|
||||
if (promptGroup.productionId.toString() === promptId.toString()) {
|
||||
await PromptGroup.updateOne(
|
||||
{ _id: groupId },
|
||||
{ productionId: remainingPrompts[remainingPrompts.length - 1]._id },
|
||||
);
|
||||
}
|
||||
|
||||
return { prompt: 'Prompt deleted successfully' };
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Update prompt group
|
||||
* @param {Partial<MongoPromptGroup>} filter - Filter to find prompt group
|
||||
* @param {Partial<MongoPromptGroup>} data - Data to update
|
||||
* @returns {Promise<TUpdatePromptGroupResponse>}
|
||||
*/
|
||||
updatePromptGroup: async (filter, data) => {
|
||||
try {
|
||||
const updateOps = {};
|
||||
if (data.removeProjectIds) {
|
||||
for (const projectId of data.removeProjectIds) {
|
||||
await removeGroupIdsFromProject(projectId, [filter._id]);
|
||||
}
|
||||
|
||||
updateOps.$pull = { projectIds: { $in: data.removeProjectIds } };
|
||||
delete data.removeProjectIds;
|
||||
}
|
||||
|
||||
if (data.projectIds) {
|
||||
for (const projectId of data.projectIds) {
|
||||
await addGroupIdsToProject(projectId, [filter._id]);
|
||||
}
|
||||
|
||||
updateOps.$addToSet = { projectIds: { $each: data.projectIds } };
|
||||
delete data.projectIds;
|
||||
}
|
||||
|
||||
const updateData = { ...data, ...updateOps };
|
||||
const updatedDoc = await PromptGroup.findOneAndUpdate(filter, updateData, {
|
||||
new: true,
|
||||
upsert: false,
|
||||
});
|
||||
|
||||
if (!updatedDoc) {
|
||||
throw new Error('Prompt group not found');
|
||||
}
|
||||
|
||||
return updatedDoc;
|
||||
} catch (error) {
|
||||
logger.error('Error updating prompt group', error);
|
||||
return { message: 'Error updating prompt group' };
|
||||
}
|
||||
},
|
||||
/**
|
||||
* Function to make a prompt production based on its ID.
|
||||
* @param {String} promptId - The ID of the prompt to make production.
|
||||
* @returns {Object} The result of the production operation.
|
||||
*/
|
||||
makePromptProduction: async (promptId) => {
|
||||
try {
|
||||
const prompt = await Prompt.findById(promptId).lean();
|
||||
|
||||
if (!prompt) {
|
||||
throw new Error('Prompt not found');
|
||||
}
|
||||
|
||||
await PromptGroup.findByIdAndUpdate(
|
||||
prompt.groupId,
|
||||
{ productionId: prompt._id },
|
||||
{ new: true },
|
||||
)
|
||||
.lean()
|
||||
.exec();
|
||||
|
||||
return {
|
||||
message: 'Prompt production made successfully',
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('Error making prompt production', error);
|
||||
return { message: 'Error making prompt production' };
|
||||
}
|
||||
},
|
||||
updatePromptLabels: async (_id, labels) => {
|
||||
try {
|
||||
const response = await Prompt.updateOne({ _id }, { $set: { labels } });
|
||||
if (response.matchedCount === 0) {
|
||||
return { message: 'Prompt not found' };
|
||||
}
|
||||
return { message: 'Prompt labels updated successfully' };
|
||||
} catch (error) {
|
||||
logger.error('Error updating prompt labels', error);
|
||||
return { message: 'Error updating prompt labels' };
|
||||
}
|
||||
},
|
||||
deletePromptGroup: async (_id) => {
|
||||
try {
|
||||
const response = await PromptGroup.deleteOne({ _id });
|
||||
|
||||
if (response.deletedCount === 0) {
|
||||
return { promptGroup: 'Prompt group not found' };
|
||||
}
|
||||
|
||||
await Prompt.deleteMany({ groupId: new ObjectId(_id) });
|
||||
await removeGroupFromAllProjects(_id);
|
||||
return { promptGroup: 'Prompt group deleted successfully' };
|
||||
} catch (error) {
|
||||
logger.error('Error deleting prompt group', error);
|
||||
return { message: 'Error deleting prompt group' };
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
86
api/models/Role.js
Normal file
86
api/models/Role.js
Normal file
@@ -0,0 +1,86 @@
|
||||
const { SystemRoles, CacheKeys, roleDefaults } = require('librechat-data-provider');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const Role = require('~/models/schema/roleSchema');
|
||||
|
||||
/**
|
||||
* Retrieve a role by name and convert the found role document to a plain object.
|
||||
* If the role with the given name doesn't exist and the name is a system defined role, create it and return the lean version.
|
||||
*
|
||||
* @param {string} roleName - The name of the role to find or create.
|
||||
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
|
||||
* @returns {Promise<Object>} A plain object representing the role document.
|
||||
*/
|
||||
const getRoleByName = async function (roleName, fieldsToSelect = null) {
|
||||
try {
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
const cachedRole = await cache.get(roleName);
|
||||
if (cachedRole) {
|
||||
return cachedRole;
|
||||
}
|
||||
let query = Role.findOne({ name: roleName });
|
||||
if (fieldsToSelect) {
|
||||
query = query.select(fieldsToSelect);
|
||||
}
|
||||
let role = await query.lean().exec();
|
||||
|
||||
if (!role && SystemRoles[roleName]) {
|
||||
role = roleDefaults[roleName];
|
||||
role = await new Role(role).save();
|
||||
await cache.set(roleName, role);
|
||||
return role.toObject();
|
||||
}
|
||||
await cache.set(roleName, role);
|
||||
return role;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to retrieve or create role: ${error.message}`);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Update role values by name.
|
||||
*
|
||||
* @param {string} roleName - The name of the role to update.
|
||||
* @param {Partial<TRole>} updates - The fields to update.
|
||||
* @returns {Promise<TRole>} Updated role document.
|
||||
*/
|
||||
const updateRoleByName = async function (roleName, updates) {
|
||||
try {
|
||||
const cache = getLogStores(CacheKeys.ROLES);
|
||||
const role = await Role.findOneAndUpdate(
|
||||
{ name: roleName },
|
||||
{ $set: updates },
|
||||
{ new: true, lean: true },
|
||||
)
|
||||
.select('-__v')
|
||||
.lean()
|
||||
.exec();
|
||||
await cache.set(roleName, role);
|
||||
return role;
|
||||
} catch (error) {
|
||||
throw new Error(`Failed to update role: ${error.message}`);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Initialize default roles in the system.
|
||||
* Creates the default roles (ADMIN, USER) if they don't exist in the database.
|
||||
*
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const initializeRoles = async function () {
|
||||
const defaultRoles = [SystemRoles.ADMIN, SystemRoles.USER];
|
||||
|
||||
for (const roleName of defaultRoles) {
|
||||
let role = await Role.findOne({ name: roleName }).select('name').lean();
|
||||
if (!role) {
|
||||
role = new Role(roleDefaults[roleName]);
|
||||
await role.save();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getRoleByName,
|
||||
initializeRoles,
|
||||
updateRoleByName,
|
||||
};
|
||||
@@ -1,6 +1,6 @@
|
||||
const crypto = require('crypto');
|
||||
const mongoose = require('mongoose');
|
||||
const signPayload = require('~/server/services/signPayload');
|
||||
const { hashToken } = require('~/server/utils/crypto');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
|
||||
@@ -39,8 +39,7 @@ sessionSchema.methods.generateRefreshToken = async function () {
|
||||
expirationTime: Math.floor((expiresIn - Date.now()) / 1000),
|
||||
});
|
||||
|
||||
const hash = crypto.createHash('sha256');
|
||||
this.refreshTokenHash = hash.update(refreshToken).digest('hex');
|
||||
this.refreshTokenHash = await hashToken(refreshToken);
|
||||
|
||||
await this.save();
|
||||
|
||||
|
||||
@@ -1,106 +1,252 @@
|
||||
const crypto = require('crypto');
|
||||
const { getMessages } = require('./Message');
|
||||
const { nanoid } = require('nanoid');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const SharedLink = require('./schema/shareSchema');
|
||||
const { getMessages } = require('./Message');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
module.exports = {
|
||||
SharedLink,
|
||||
getSharedMessages: async (shareId) => {
|
||||
try {
|
||||
const share = await SharedLink.findOne({ shareId })
|
||||
.populate({
|
||||
path: 'messages',
|
||||
select: '-_id -__v -user',
|
||||
})
|
||||
.select('-_id -__v -user')
|
||||
.lean();
|
||||
/**
|
||||
* Anonymizes a conversation ID
|
||||
* @returns {string} The anonymized conversation ID
|
||||
*/
|
||||
function anonymizeConvoId() {
|
||||
return `convo_${nanoid()}`;
|
||||
}
|
||||
|
||||
if (!share || !share.conversationId || !share.isPublic) {
|
||||
return null;
|
||||
}
|
||||
/**
|
||||
* Anonymizes an assistant ID
|
||||
* @returns {string} The anonymized assistant ID
|
||||
*/
|
||||
function anonymizeAssistantId() {
|
||||
return `a_${nanoid()}`;
|
||||
}
|
||||
|
||||
return share;
|
||||
} catch (error) {
|
||||
logger.error('[getShare] Error getting share link', error);
|
||||
return { message: 'Error getting share link' };
|
||||
/**
|
||||
* Anonymizes a message ID
|
||||
* @param {string} id - The original message ID
|
||||
* @returns {string} The anonymized message ID
|
||||
*/
|
||||
function anonymizeMessageId(id) {
|
||||
return id === Constants.NO_PARENT ? id : `msg_${nanoid()}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Anonymizes a conversation object
|
||||
* @param {object} conversation - The conversation object
|
||||
* @returns {object} The anonymized conversation object
|
||||
*/
|
||||
function anonymizeConvo(conversation) {
|
||||
const newConvo = { ...conversation };
|
||||
if (newConvo.assistant_id) {
|
||||
newConvo.assistant_id = anonymizeAssistantId();
|
||||
}
|
||||
return newConvo;
|
||||
}
|
||||
|
||||
/**
|
||||
* Anonymizes messages in a conversation
|
||||
* @param {TMessage[]} messages - The original messages
|
||||
* @param {string} newConvoId - The new conversation ID
|
||||
* @returns {TMessage[]} The anonymized messages
|
||||
*/
|
||||
function anonymizeMessages(messages, newConvoId) {
|
||||
const idMap = new Map();
|
||||
return messages.map((message) => {
|
||||
const newMessageId = anonymizeMessageId(message.messageId);
|
||||
idMap.set(message.messageId, newMessageId);
|
||||
|
||||
const anonymizedMessage = Object.assign(message, {
|
||||
messageId: newMessageId,
|
||||
parentMessageId:
|
||||
idMap.get(message.parentMessageId) || anonymizeMessageId(message.parentMessageId),
|
||||
conversationId: newConvoId,
|
||||
});
|
||||
|
||||
if (anonymizedMessage.model && anonymizedMessage.model.startsWith('asst_')) {
|
||||
anonymizedMessage.model = anonymizeAssistantId();
|
||||
}
|
||||
},
|
||||
|
||||
getSharedLinks: async (user, pageNumber = 1, pageSize = 25, isPublic = true) => {
|
||||
const query = { user, isPublic };
|
||||
try {
|
||||
const totalConvos = (await SharedLink.countDocuments(query)) || 1;
|
||||
const totalPages = Math.ceil(totalConvos / pageSize);
|
||||
const shares = await SharedLink.find(query)
|
||||
return anonymizedMessage;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves shared messages for a given share ID
|
||||
* @param {string} shareId - The share ID
|
||||
* @returns {Promise<object|null>} The shared conversation data or null if not found
|
||||
*/
|
||||
async function getSharedMessages(shareId) {
|
||||
try {
|
||||
const share = await SharedLink.findOne({ shareId })
|
||||
.populate({
|
||||
path: 'messages',
|
||||
select: '-_id -__v -user',
|
||||
})
|
||||
.select('-_id -__v -user')
|
||||
.lean();
|
||||
|
||||
if (!share || !share.conversationId || !share.isPublic) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const newConvoId = anonymizeConvoId();
|
||||
return Object.assign(share, {
|
||||
conversationId: newConvoId,
|
||||
messages: anonymizeMessages(share.messages, newConvoId),
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[getShare] Error getting share link', error);
|
||||
throw new Error('Error getting share link');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves shared links for a user
|
||||
* @param {string} user - The user ID
|
||||
* @param {number} [pageNumber=1] - The page number
|
||||
* @param {number} [pageSize=25] - The page size
|
||||
* @param {boolean} [isPublic=true] - Whether to retrieve public links only
|
||||
* @returns {Promise<object>} The shared links and pagination data
|
||||
*/
|
||||
async function getSharedLinks(user, pageNumber = 1, pageSize = 25, isPublic = true) {
|
||||
const query = { user, isPublic };
|
||||
try {
|
||||
const [totalConvos, sharedLinks] = await Promise.all([
|
||||
SharedLink.countDocuments(query),
|
||||
SharedLink.find(query)
|
||||
.sort({ updatedAt: -1 })
|
||||
.skip((pageNumber - 1) * pageSize)
|
||||
.limit(pageSize)
|
||||
.select('-_id -__v -user')
|
||||
.lean();
|
||||
.lean(),
|
||||
]);
|
||||
|
||||
return { sharedLinks: shares, pages: totalPages, pageNumber, pageSize };
|
||||
} catch (error) {
|
||||
logger.error('[getShareByPage] Error getting shares', error);
|
||||
return { message: 'Error getting shares' };
|
||||
}
|
||||
},
|
||||
const totalPages = Math.ceil((totalConvos || 1) / pageSize);
|
||||
|
||||
createSharedLink: async (user, { conversationId, ...shareData }) => {
|
||||
return {
|
||||
sharedLinks,
|
||||
pages: totalPages,
|
||||
pageNumber,
|
||||
pageSize,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('[getShareByPage] Error getting shares', error);
|
||||
throw new Error('Error getting shares');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new shared link
|
||||
* @param {string} user - The user ID
|
||||
* @param {object} shareData - The share data
|
||||
* @param {string} shareData.conversationId - The conversation ID
|
||||
* @returns {Promise<object>} The created shared link
|
||||
*/
|
||||
async function createSharedLink(user, { conversationId, ...shareData }) {
|
||||
try {
|
||||
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
|
||||
if (share) {
|
||||
return share;
|
||||
}
|
||||
|
||||
try {
|
||||
const shareId = crypto.randomUUID();
|
||||
const messages = await getMessages({ conversationId });
|
||||
const update = { ...shareData, shareId, messages, user };
|
||||
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: true,
|
||||
const newConvoId = anonymizeConvoId();
|
||||
const sharedConvo = anonymizeConvo(share);
|
||||
return Object.assign(sharedConvo, {
|
||||
conversationId: newConvoId,
|
||||
messages: anonymizeMessages(share.messages, newConvoId),
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[saveShareMessage] Error saving conversation', error);
|
||||
return { message: 'Error saving conversation' };
|
||||
}
|
||||
},
|
||||
|
||||
updateSharedLink: async (user, { conversationId, ...shareData }) => {
|
||||
const shareId = nanoid();
|
||||
const messages = await getMessages({ conversationId });
|
||||
const update = { ...shareData, shareId, messages, user };
|
||||
const newShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: true,
|
||||
}).lean();
|
||||
|
||||
const newConvoId = anonymizeConvoId();
|
||||
const sharedConvo = anonymizeConvo(newShare);
|
||||
return Object.assign(sharedConvo, {
|
||||
conversationId: newConvoId,
|
||||
messages: anonymizeMessages(newShare.messages, newConvoId),
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[createSharedLink] Error creating shared link', error);
|
||||
throw new Error('Error creating shared link');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates an existing shared link
|
||||
* @param {string} user - The user ID
|
||||
* @param {object} shareData - The share data to update
|
||||
* @param {string} shareData.conversationId - The conversation ID
|
||||
* @returns {Promise<object>} The updated shared link
|
||||
*/
|
||||
async function updateSharedLink(user, { conversationId, ...shareData }) {
|
||||
try {
|
||||
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
|
||||
if (!share) {
|
||||
return { message: 'Share not found' };
|
||||
}
|
||||
// update messages to the latest
|
||||
|
||||
const messages = await getMessages({ conversationId });
|
||||
const update = { ...shareData, messages, user };
|
||||
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
|
||||
const updatedShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: false,
|
||||
});
|
||||
},
|
||||
}).lean();
|
||||
|
||||
deleteSharedLink: async (user, { shareId }) => {
|
||||
const share = await SharedLink.findOne({ shareId, user });
|
||||
if (!share) {
|
||||
return { message: 'Share not found' };
|
||||
}
|
||||
return await SharedLink.findOneAndDelete({ shareId, user });
|
||||
},
|
||||
/**
|
||||
* Deletes all shared links for a specific user.
|
||||
* @param {string} user - The user ID.
|
||||
* @returns {Promise<{ message: string, deletedCount?: number }>} A result object indicating success or error message.
|
||||
*/
|
||||
deleteAllSharedLinks: async (user) => {
|
||||
try {
|
||||
const result = await SharedLink.deleteMany({ user });
|
||||
return {
|
||||
message: 'All shared links have been deleted successfully',
|
||||
deletedCount: result.deletedCount,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('[deleteAllSharedLinks] Error deleting shared links', error);
|
||||
return { message: 'Error deleting shared links' };
|
||||
}
|
||||
},
|
||||
const newConvoId = anonymizeConvoId();
|
||||
const sharedConvo = anonymizeConvo(updatedShare);
|
||||
return Object.assign(sharedConvo, {
|
||||
conversationId: newConvoId,
|
||||
messages: anonymizeMessages(updatedShare.messages, newConvoId),
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[updateSharedLink] Error updating shared link', error);
|
||||
throw new Error('Error updating shared link');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a shared link
|
||||
* @param {string} user - The user ID
|
||||
* @param {object} params - The deletion parameters
|
||||
* @param {string} params.shareId - The share ID to delete
|
||||
* @returns {Promise<object>} The result of the deletion
|
||||
*/
|
||||
async function deleteSharedLink(user, { shareId }) {
|
||||
try {
|
||||
const result = await SharedLink.findOneAndDelete({ shareId, user });
|
||||
return result ? { message: 'Share deleted successfully' } : { message: 'Share not found' };
|
||||
} catch (error) {
|
||||
logger.error('[deleteSharedLink] Error deleting shared link', error);
|
||||
throw new Error('Error deleting shared link');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all shared links for a specific user
|
||||
* @param {string} user - The user ID
|
||||
* @returns {Promise<object>} The result of the deletion
|
||||
*/
|
||||
async function deleteAllSharedLinks(user) {
|
||||
try {
|
||||
const result = await SharedLink.deleteMany({ user });
|
||||
return {
|
||||
message: 'All shared links have been deleted successfully',
|
||||
deletedCount: result.deletedCount,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('[deleteAllSharedLinks] Error deleting shared links', error);
|
||||
throw new Error('Error deleting shared links');
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
SharedLink,
|
||||
getSharedLinks,
|
||||
createSharedLink,
|
||||
updateSharedLink,
|
||||
deleteSharedLink,
|
||||
getSharedMessages,
|
||||
deleteAllSharedLinks,
|
||||
};
|
||||
|
||||
19
api/models/schema/categories.js
Normal file
19
api/models/schema/categories.js
Normal file
@@ -0,0 +1,19 @@
|
||||
const mongoose = require('mongoose');
|
||||
const Schema = mongoose.Schema;
|
||||
|
||||
const categoriesSchema = new Schema({
|
||||
label: {
|
||||
type: String,
|
||||
required: true,
|
||||
unique: true,
|
||||
},
|
||||
value: {
|
||||
type: String,
|
||||
required: true,
|
||||
unique: true,
|
||||
},
|
||||
});
|
||||
|
||||
const categories = mongoose.model('categories', categoriesSchema);
|
||||
|
||||
module.exports = { Categories: categories };
|
||||
31
api/models/schema/conversationTagSchema.js
Normal file
31
api/models/schema/conversationTagSchema.js
Normal file
@@ -0,0 +1,31 @@
|
||||
const mongoose = require('mongoose');
|
||||
|
||||
const conversationTagSchema = mongoose.Schema(
|
||||
{
|
||||
tag: {
|
||||
type: String,
|
||||
index: true,
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
index: true,
|
||||
},
|
||||
description: {
|
||||
type: String,
|
||||
index: true,
|
||||
},
|
||||
count: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
},
|
||||
position: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
},
|
||||
},
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
conversationTagSchema.index({ tag: 1, user: 1 }, { unique: true });
|
||||
|
||||
module.exports = mongoose.model('ConversationTag', conversationTagSchema);
|
||||
@@ -42,6 +42,11 @@ const convoSchema = mongoose.Schema(
|
||||
invocationId: {
|
||||
type: Number,
|
||||
},
|
||||
tags: {
|
||||
type: [String],
|
||||
default: [],
|
||||
meiliIndex: true,
|
||||
},
|
||||
},
|
||||
{ timestamps: true },
|
||||
);
|
||||
@@ -56,6 +61,7 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
||||
}
|
||||
|
||||
convoSchema.index({ createdAt: 1, updatedAt: 1 });
|
||||
convoSchema.index({ conversationId: 1, user: 1 }, { unique: true });
|
||||
|
||||
const Conversation = mongoose.models.Conversation || mongoose.model('Conversation', convoSchema);
|
||||
|
||||
|
||||
@@ -103,6 +103,10 @@ const conversationPreset = {
|
||||
spec: {
|
||||
type: String,
|
||||
},
|
||||
tags: {
|
||||
type: [String],
|
||||
default: [],
|
||||
},
|
||||
tools: { type: [{ type: String }], default: undefined },
|
||||
maxContextTokens: {
|
||||
type: Number,
|
||||
|
||||
@@ -129,7 +129,9 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
|
||||
}
|
||||
|
||||
messageSchema.index({ createdAt: 1 });
|
||||
messageSchema.index({ messageId: 1, user: 1 }, { unique: true });
|
||||
|
||||
/** @type {mongoose.Model<TMessage>} */
|
||||
const Message = mongoose.models.Message || mongoose.model('Message', messageSchema);
|
||||
|
||||
module.exports = Message;
|
||||
|
||||
30
api/models/schema/projectSchema.js
Normal file
30
api/models/schema/projectSchema.js
Normal file
@@ -0,0 +1,30 @@
|
||||
const { Schema } = require('mongoose');
|
||||
|
||||
/**
|
||||
* @typedef {Object} MongoProject
|
||||
* @property {ObjectId} [_id] - MongoDB Document ID
|
||||
* @property {string} name - The name of the project
|
||||
* @property {ObjectId[]} promptGroupIds - Array of PromptGroup IDs associated with the project
|
||||
* @property {Date} [createdAt] - Date when the project was created (added by timestamps)
|
||||
* @property {Date} [updatedAt] - Date when the project was last updated (added by timestamps)
|
||||
*/
|
||||
|
||||
const projectSchema = new Schema(
|
||||
{
|
||||
name: {
|
||||
type: String,
|
||||
required: true,
|
||||
index: true,
|
||||
},
|
||||
promptGroupIds: {
|
||||
type: [Schema.Types.ObjectId],
|
||||
ref: 'PromptGroup',
|
||||
default: [],
|
||||
},
|
||||
},
|
||||
{
|
||||
timestamps: true,
|
||||
},
|
||||
);
|
||||
|
||||
module.exports = projectSchema;
|
||||
118
api/models/schema/promptSchema.js
Normal file
118
api/models/schema/promptSchema.js
Normal file
@@ -0,0 +1,118 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { Constants } = require('librechat-data-provider');
|
||||
const Schema = mongoose.Schema;
|
||||
|
||||
/**
|
||||
* @typedef {Object} MongoPromptGroup
|
||||
* @property {ObjectId} [_id] - MongoDB Document ID
|
||||
* @property {string} name - The name of the prompt group
|
||||
* @property {ObjectId} author - The author of the prompt group
|
||||
* @property {ObjectId} [projectId=null] - The project ID of the prompt group
|
||||
* @property {ObjectId} [productionId=null] - The project ID of the prompt group
|
||||
* @property {string} authorName - The name of the author of the prompt group
|
||||
* @property {number} [numberOfGenerations=0] - Number of generations the prompt group has
|
||||
* @property {string} [oneliner=''] - Oneliner description of the prompt group
|
||||
* @property {string} [category=''] - Category of the prompt group
|
||||
* @property {string} [command] - Command for the prompt group
|
||||
* @property {Date} [createdAt] - Date when the prompt group was created (added by timestamps)
|
||||
* @property {Date} [updatedAt] - Date when the prompt group was last updated (added by timestamps)
|
||||
*/
|
||||
|
||||
const promptGroupSchema = new Schema(
|
||||
{
|
||||
name: {
|
||||
type: String,
|
||||
required: true,
|
||||
index: true,
|
||||
},
|
||||
numberOfGenerations: {
|
||||
type: Number,
|
||||
default: 0,
|
||||
},
|
||||
oneliner: {
|
||||
type: String,
|
||||
default: '',
|
||||
},
|
||||
category: {
|
||||
type: String,
|
||||
default: '',
|
||||
index: true,
|
||||
},
|
||||
projectIds: {
|
||||
type: [Schema.Types.ObjectId],
|
||||
ref: 'Project',
|
||||
index: true,
|
||||
},
|
||||
productionId: {
|
||||
type: Schema.Types.ObjectId,
|
||||
ref: 'Prompt',
|
||||
required: true,
|
||||
index: true,
|
||||
},
|
||||
author: {
|
||||
type: Schema.Types.ObjectId,
|
||||
ref: 'User',
|
||||
required: true,
|
||||
index: true,
|
||||
},
|
||||
authorName: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
command: {
|
||||
type: String,
|
||||
index: true,
|
||||
validate: {
|
||||
validator: function (v) {
|
||||
return v === undefined || v === null || v === '' || /^[a-z0-9-]+$/.test(v);
|
||||
},
|
||||
message: (props) =>
|
||||
`${props.value} is not a valid command. Only lowercase alphanumeric characters and highfins (') are allowed.`,
|
||||
},
|
||||
maxlength: [
|
||||
Constants.COMMANDS_MAX_LENGTH,
|
||||
`Command cannot be longer than ${Constants.COMMANDS_MAX_LENGTH} characters`,
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
timestamps: true,
|
||||
},
|
||||
);
|
||||
|
||||
const PromptGroup = mongoose.model('PromptGroup', promptGroupSchema);
|
||||
|
||||
const promptSchema = new Schema(
|
||||
{
|
||||
groupId: {
|
||||
type: Schema.Types.ObjectId,
|
||||
ref: 'PromptGroup',
|
||||
required: true,
|
||||
index: true,
|
||||
},
|
||||
author: {
|
||||
type: Schema.Types.ObjectId,
|
||||
ref: 'User',
|
||||
required: true,
|
||||
},
|
||||
prompt: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
type: {
|
||||
type: String,
|
||||
enum: ['text', 'chat'],
|
||||
required: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
timestamps: true,
|
||||
},
|
||||
);
|
||||
|
||||
const Prompt = mongoose.model('Prompt', promptSchema);
|
||||
|
||||
promptSchema.index({ createdAt: 1, updatedAt: 1 });
|
||||
promptGroupSchema.index({ createdAt: 1, updatedAt: 1 });
|
||||
|
||||
module.exports = { Prompt, PromptGroup };
|
||||
29
api/models/schema/roleSchema.js
Normal file
29
api/models/schema/roleSchema.js
Normal file
@@ -0,0 +1,29 @@
|
||||
const { PermissionTypes, Permissions } = require('librechat-data-provider');
|
||||
const mongoose = require('mongoose');
|
||||
|
||||
const roleSchema = new mongoose.Schema({
|
||||
name: {
|
||||
type: String,
|
||||
required: true,
|
||||
unique: true,
|
||||
index: true,
|
||||
},
|
||||
[PermissionTypes.PROMPTS]: {
|
||||
[Permissions.SHARED_GLOBAL]: {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
},
|
||||
[Permissions.USE]: {
|
||||
type: Boolean,
|
||||
default: true,
|
||||
},
|
||||
[Permissions.CREATE]: {
|
||||
type: Boolean,
|
||||
default: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const Role = mongoose.model('Role', roleSchema);
|
||||
|
||||
module.exports = Role;
|
||||
@@ -1,4 +1,5 @@
|
||||
const mongoose = require('mongoose');
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
|
||||
/**
|
||||
* @typedef {Object} MongoSession
|
||||
@@ -78,7 +79,7 @@ const userSchema = mongoose.Schema(
|
||||
},
|
||||
role: {
|
||||
type: String,
|
||||
default: 'USER',
|
||||
default: SystemRoles.USER,
|
||||
},
|
||||
googleId: {
|
||||
type: String,
|
||||
|
||||
@@ -1,36 +1,74 @@
|
||||
const { matchModelName } = require('../utils');
|
||||
const defaultRate = 6;
|
||||
|
||||
/** AWS Bedrock pricing */
|
||||
const bedrockValues = {
|
||||
'anthropic.claude-3-haiku-20240307-v1:0': { prompt: 0.25, completion: 1.25 },
|
||||
'anthropic.claude-3-sonnet-20240229-v1:0': { prompt: 3.0, completion: 15.0 },
|
||||
'anthropic.claude-3-opus-20240229-v1:0': { prompt: 15.0, completion: 75.0 },
|
||||
'anthropic.claude-3-5-sonnet-20240620-v1:0': { prompt: 3.0, completion: 15.0 },
|
||||
'anthropic.claude-v2:1': { prompt: 8.0, completion: 24.0 },
|
||||
'anthropic.claude-instant-v1': { prompt: 0.8, completion: 2.4 },
|
||||
'meta.llama2-13b-chat-v1': { prompt: 0.75, completion: 1.0 },
|
||||
'meta.llama2-70b-chat-v1': { prompt: 1.95, completion: 2.56 },
|
||||
'meta.llama3-8b-instruct-v1:0': { prompt: 0.3, completion: 0.6 },
|
||||
'meta.llama3-70b-instruct-v1:0': { prompt: 2.65, completion: 3.5 },
|
||||
'meta.llama3-1-8b-instruct-v1:0': { prompt: 0.3, completion: 0.6 },
|
||||
'meta.llama3-1-70b-instruct-v1:0': { prompt: 2.65, completion: 3.5 },
|
||||
'meta.llama3-1-405b-instruct-v1:0': { prompt: 5.32, completion: 16.0 },
|
||||
'mistral.mistral-7b-instruct-v0:2': { prompt: 0.15, completion: 0.2 },
|
||||
'mistral.mistral-small-2402-v1:0': { prompt: 0.15, completion: 0.2 },
|
||||
'mistral.mixtral-8x7b-instruct-v0:1': { prompt: 0.45, completion: 0.7 },
|
||||
'mistral.mistral-large-2402-v1:0': { prompt: 4.0, completion: 12.0 },
|
||||
'mistral.mistral-large-2407-v1:0': { prompt: 3.0, completion: 9.0 },
|
||||
'cohere.command-text-v14': { prompt: 1.5, completion: 2.0 },
|
||||
'cohere.command-light-text-v14': { prompt: 0.3, completion: 0.6 },
|
||||
'cohere.command-r-v1:0': { prompt: 0.5, completion: 1.5 },
|
||||
'cohere.command-r-plus-v1:0': { prompt: 3.0, completion: 15.0 },
|
||||
'ai21.j2-mid-v1': { prompt: 12.5, completion: 12.5 },
|
||||
'ai21.j2-ultra-v1': { prompt: 18.8, completion: 18.8 },
|
||||
'amazon.titan-text-lite-v1': { prompt: 0.15, completion: 0.2 },
|
||||
'amazon.titan-text-express-v1': { prompt: 0.2, completion: 0.6 },
|
||||
};
|
||||
|
||||
for (const [key, value] of Object.entries(bedrockValues)) {
|
||||
bedrockValues[`bedrock/${key}`] = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mapping of model token sizes to their respective multipliers for prompt and completion.
|
||||
* The rates are 1 USD per 1M tokens.
|
||||
* @type {Object.<string, {prompt: number, completion: number}>}
|
||||
*/
|
||||
const tokenValues = {
|
||||
'8k': { prompt: 30, completion: 60 },
|
||||
'32k': { prompt: 60, completion: 120 },
|
||||
'4k': { prompt: 1.5, completion: 2 },
|
||||
'16k': { prompt: 3, completion: 4 },
|
||||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||
'gpt-4o': { prompt: 5, completion: 15 },
|
||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||
'claude-3-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
||||
'claude-2.1': { prompt: 8, completion: 24 },
|
||||
'claude-2': { prompt: 8, completion: 24 },
|
||||
'claude-': { prompt: 0.8, completion: 2.4 },
|
||||
'command-r-plus': { prompt: 3, completion: 15 },
|
||||
'command-r': { prompt: 0.5, completion: 1.5 },
|
||||
/* cohere doesn't have rates for the older command models,
|
||||
const tokenValues = Object.assign(
|
||||
{
|
||||
'8k': { prompt: 30, completion: 60 },
|
||||
'32k': { prompt: 60, completion: 120 },
|
||||
'4k': { prompt: 1.5, completion: 2 },
|
||||
'16k': { prompt: 3, completion: 4 },
|
||||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||
'gpt-4o-2024-08-06': { prompt: 2.5, completion: 10 },
|
||||
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
|
||||
'gpt-4o': { prompt: 5, completion: 15 },
|
||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||
'claude-3-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
|
||||
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
|
||||
'claude-2.1': { prompt: 8, completion: 24 },
|
||||
'claude-2': { prompt: 8, completion: 24 },
|
||||
'claude-': { prompt: 0.8, completion: 2.4 },
|
||||
'command-r-plus': { prompt: 3, completion: 15 },
|
||||
'command-r': { prompt: 0.5, completion: 1.5 },
|
||||
/* cohere doesn't have rates for the older command models,
|
||||
so this was from https://artificialanalysis.ai/models/command-light/providers */
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
// 'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
|
||||
// 'gemini': { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
|
||||
'gemini-1.5': { prompt: 0, completion: 0 }, // currently free
|
||||
gemini: { prompt: 0, completion: 0 }, // currently free
|
||||
};
|
||||
command: { prompt: 0.38, completion: 0.38 },
|
||||
'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
|
||||
gemini: { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
|
||||
},
|
||||
bedrockValues,
|
||||
);
|
||||
|
||||
/**
|
||||
* Retrieves the key associated with a given model name.
|
||||
@@ -53,6 +91,10 @@ const getValueKey = (model, endpoint) => {
|
||||
return 'gpt-3.5-turbo-1106';
|
||||
} else if (modelName.includes('gpt-3.5')) {
|
||||
return '4k';
|
||||
} else if (modelName.includes('gpt-4o-2024-08-06')) {
|
||||
return 'gpt-4o-2024-08-06';
|
||||
} else if (modelName.includes('gpt-4o-mini')) {
|
||||
return 'gpt-4o-mini';
|
||||
} else if (modelName.includes('gpt-4o')) {
|
||||
return 'gpt-4o';
|
||||
} else if (modelName.includes('gpt-4-vision')) {
|
||||
|
||||
@@ -48,6 +48,27 @@ describe('getValueKey', () => {
|
||||
expect(getValueKey('gpt-4o-turbo')).toBe('gpt-4o');
|
||||
expect(getValueKey('gpt-4o-0125')).toBe('gpt-4o');
|
||||
});
|
||||
|
||||
it('should return "gpt-4o-mini" for model type of "gpt-4o-mini"', () => {
|
||||
expect(getValueKey('gpt-4o-mini-2024-07-18')).toBe('gpt-4o-mini');
|
||||
expect(getValueKey('openai/gpt-4o-mini')).toBe('gpt-4o-mini');
|
||||
expect(getValueKey('gpt-4o-mini-0718')).toBe('gpt-4o-mini');
|
||||
expect(getValueKey('gpt-4o-2024-08-06-0718')).not.toBe('gpt-4o');
|
||||
});
|
||||
|
||||
it('should return "gpt-4o-2024-08-06" for model type of "gpt-4o-2024-08-06"', () => {
|
||||
expect(getValueKey('gpt-4o-2024-08-06-2024-07-18')).toBe('gpt-4o-2024-08-06');
|
||||
expect(getValueKey('openai/gpt-4o-2024-08-06')).toBe('gpt-4o-2024-08-06');
|
||||
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o-2024-08-06');
|
||||
expect(getValueKey('gpt-4o-2024-08-06-0718')).not.toBe('gpt-4o');
|
||||
});
|
||||
|
||||
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
|
||||
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
|
||||
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
|
||||
expect(getValueKey('claude-3-5-sonnet-turbo')).toBe('claude-3-5-sonnet');
|
||||
expect(getValueKey('claude-3-5-sonnet-0125')).toBe('claude-3-5-sonnet');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getMultiplier', () => {
|
||||
@@ -102,6 +123,19 @@ describe('getMultiplier', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4o-mini', () => {
|
||||
const valueKey = getValueKey('gpt-4o-mini-2024-07-18');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
|
||||
tokenValues['gpt-4o-mini'].prompt,
|
||||
);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4o-mini'].completion,
|
||||
);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
|
||||
tokenValues['gpt-4-1106'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should derive the valueKey from the model if not provided for new models', () => {
|
||||
expect(
|
||||
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),
|
||||
@@ -126,3 +160,68 @@ describe('getMultiplier', () => {
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('AWS Bedrock Model Tests', () => {
|
||||
const awsModels = [
|
||||
'anthropic.claude-3-haiku-20240307-v1:0',
|
||||
'anthropic.claude-3-sonnet-20240229-v1:0',
|
||||
'anthropic.claude-3-opus-20240229-v1:0',
|
||||
'anthropic.claude-3-5-sonnet-20240620-v1:0',
|
||||
'anthropic.claude-v2:1',
|
||||
'anthropic.claude-instant-v1',
|
||||
'meta.llama2-13b-chat-v1',
|
||||
'meta.llama2-70b-chat-v1',
|
||||
'meta.llama3-8b-instruct-v1:0',
|
||||
'meta.llama3-70b-instruct-v1:0',
|
||||
'meta.llama3-1-8b-instruct-v1:0',
|
||||
'meta.llama3-1-70b-instruct-v1:0',
|
||||
'meta.llama3-1-405b-instruct-v1:0',
|
||||
'mistral.mistral-7b-instruct-v0:2',
|
||||
'mistral.mistral-small-2402-v1:0',
|
||||
'mistral.mixtral-8x7b-instruct-v0:1',
|
||||
'mistral.mistral-large-2402-v1:0',
|
||||
'mistral.mistral-large-2407-v1:0',
|
||||
'cohere.command-text-v14',
|
||||
'cohere.command-light-text-v14',
|
||||
'cohere.command-r-v1:0',
|
||||
'cohere.command-r-plus-v1:0',
|
||||
'ai21.j2-mid-v1',
|
||||
'ai21.j2-ultra-v1',
|
||||
'amazon.titan-text-lite-v1',
|
||||
'amazon.titan-text-express-v1',
|
||||
];
|
||||
|
||||
it('should return the correct prompt multipliers for all models', () => {
|
||||
const results = awsModels.map((model) => {
|
||||
const multiplier = getMultiplier({ valueKey: model, tokenType: 'prompt' });
|
||||
return multiplier === tokenValues[model].prompt;
|
||||
});
|
||||
expect(results.every(Boolean)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return the correct completion multipliers for all models', () => {
|
||||
const results = awsModels.map((model) => {
|
||||
const multiplier = getMultiplier({ valueKey: model, tokenType: 'completion' });
|
||||
return multiplier === tokenValues[model].completion;
|
||||
});
|
||||
expect(results.every(Boolean)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return the correct prompt multipliers for all models with Bedrock prefix', () => {
|
||||
const results = awsModels.map((model) => {
|
||||
const modelName = `bedrock/${model}`;
|
||||
const multiplier = getMultiplier({ valueKey: modelName, tokenType: 'prompt' });
|
||||
return multiplier === tokenValues[model].prompt;
|
||||
});
|
||||
expect(results.every(Boolean)).toBe(true);
|
||||
});
|
||||
|
||||
it('should return the correct completion multipliers for all models with Bedrock prefix', () => {
|
||||
const results = awsModels.map((model) => {
|
||||
const modelName = `bedrock/${model}`;
|
||||
const multiplier = getMultiplier({ valueKey: modelName, tokenType: 'completion' });
|
||||
return multiplier === tokenValues[model].completion;
|
||||
});
|
||||
expect(results.every(Boolean)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -70,20 +70,11 @@ const createUser = async (data, disableTTL = true, returnUser = false) => {
|
||||
delete userData.expiresAt;
|
||||
}
|
||||
|
||||
try {
|
||||
const user = await User.create(userData);
|
||||
if (returnUser) {
|
||||
return user.toObject();
|
||||
}
|
||||
return user._id;
|
||||
} catch (error) {
|
||||
if (error.code === 11000) {
|
||||
// Duplicate key error code
|
||||
throw new Error(`User with \`_id\` ${data._id} already exists.`);
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
const user = await User.create(userData);
|
||||
if (returnUser) {
|
||||
return user.toObject();
|
||||
}
|
||||
return user._id;
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "0.7.3",
|
||||
"version": "0.7.4",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -45,6 +45,7 @@
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
"cohere-ai": "^7.9.1",
|
||||
"compression": "^1.7.4",
|
||||
"connect-redis": "^7.1.0",
|
||||
"cookie": "^0.5.0",
|
||||
"cors": "^2.8.5",
|
||||
@@ -72,6 +73,7 @@
|
||||
"module-alias": "^2.2.3",
|
||||
"mongoose": "^7.1.1",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"nanoid": "^3.3.7",
|
||||
"nodejs-gpt": "^1.37.4",
|
||||
"nodemailer": "^6.9.4",
|
||||
"ollama": "^0.5.0",
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
const throttle = require('lodash/throttle');
|
||||
const { getResponseSender, Constants, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getResponseSender, Constants, CacheKeys, Time } = require('librechat-data-provider');
|
||||
const { createAbortController, handleAbortError } = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { saveMessage } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
@@ -18,6 +19,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
logger.debug('[AskController]', { text, conversationId, ...endpointOption });
|
||||
|
||||
let userMessage;
|
||||
let userMessagePromise;
|
||||
let promptTokens;
|
||||
let userMessageId;
|
||||
let responseMessageId;
|
||||
@@ -34,6 +36,8 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
if (key === 'userMessage') {
|
||||
userMessage = data[key];
|
||||
userMessageId = data[key].messageId;
|
||||
} else if (key === 'userMessagePromise') {
|
||||
userMessagePromise = data[key];
|
||||
} else if (key === 'responseMessageId') {
|
||||
responseMessageId = data[key];
|
||||
} else if (key === 'promptTokens') {
|
||||
@@ -48,11 +52,13 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
|
||||
try {
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
const { onProgress: progressCallback, getPartialText } = createOnProgress({
|
||||
onProgress: throttle(
|
||||
({ text: partialText }) => {
|
||||
saveMessage({
|
||||
/*
|
||||
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
|
||||
messageCache.set(responseMessageId, {
|
||||
messageId: responseMessageId,
|
||||
sender,
|
||||
conversationId,
|
||||
@@ -62,7 +68,10 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
unfinished,
|
||||
error: false,
|
||||
user,
|
||||
});
|
||||
}, Time.FIVE_MINUTES);
|
||||
*/
|
||||
|
||||
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
|
||||
},
|
||||
3000,
|
||||
{ trailing: false },
|
||||
@@ -74,6 +83,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
const getAbortData = () => ({
|
||||
sender,
|
||||
conversationId,
|
||||
userMessagePromise,
|
||||
messageId: responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: getPartialText(),
|
||||
@@ -81,7 +91,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
promptTokens,
|
||||
});
|
||||
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData);
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
|
||||
|
||||
res.on('close', () => {
|
||||
logger.debug('[AskController] Request closed');
|
||||
@@ -108,7 +118,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
progressCallback,
|
||||
progressOptions: {
|
||||
res,
|
||||
text,
|
||||
// parentMessageId: overrideParentMessageId || userMessageId,
|
||||
},
|
||||
};
|
||||
@@ -121,7 +130,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
|
||||
response.endpoint = endpointOption.endpoint;
|
||||
|
||||
const conversation = await getConvo(user, conversationId);
|
||||
const { conversation = {} } = await client.responsePromise;
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
@@ -141,10 +150,18 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
|
||||
});
|
||||
res.end();
|
||||
|
||||
await saveMessage({ ...response, user });
|
||||
await saveMessage(
|
||||
req,
|
||||
{ ...response, user },
|
||||
{ context: 'api/server/controllers/AskController.js - response end' },
|
||||
);
|
||||
}
|
||||
|
||||
await saveMessage(userMessage);
|
||||
if (!client.skipSaveUserMessage) {
|
||||
await saveMessage(req, userMessage, {
|
||||
context: 'api/server/controllers/AskController.js - don\'t skip saving user message',
|
||||
});
|
||||
}
|
||||
|
||||
if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) {
|
||||
addTitle(req, {
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
const crypto = require('crypto');
|
||||
const cookies = require('cookie');
|
||||
const jwt = require('jsonwebtoken');
|
||||
const {
|
||||
@@ -7,6 +6,7 @@ const {
|
||||
setAuthTokens,
|
||||
requestPasswordReset,
|
||||
} = require('~/server/services/AuthService');
|
||||
const { hashToken } = require('~/server/utils/crypto');
|
||||
const { Session, getUserById } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
@@ -74,8 +74,7 @@ const refreshController = async (req, res) => {
|
||||
}
|
||||
|
||||
// Hash the refresh token
|
||||
const hash = crypto.createHash('sha256');
|
||||
const hashedToken = hash.update(refreshToken).digest('hex');
|
||||
const hashedToken = await hashToken(refreshToken);
|
||||
|
||||
// Find the session with the hashed refresh token
|
||||
const session = await Session.findOne({ user: userId, refreshTokenHash: hashedToken });
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
const throttle = require('lodash/throttle');
|
||||
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getResponseSender, CacheKeys, Time } = require('librechat-data-provider');
|
||||
const { createAbortController, handleAbortError } = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { saveMessage } = require('~/models');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const EditController = async (req, res, next, initializeClient) => {
|
||||
@@ -27,6 +28,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
});
|
||||
|
||||
let userMessage;
|
||||
let userMessagePromise;
|
||||
let promptTokens;
|
||||
const sender = getResponseSender({
|
||||
...endpointOption,
|
||||
@@ -40,6 +42,8 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
for (let key in data) {
|
||||
if (key === 'userMessage') {
|
||||
userMessage = data[key];
|
||||
} else if (key === 'userMessagePromise') {
|
||||
userMessagePromise = data[key];
|
||||
} else if (key === 'responseMessageId') {
|
||||
responseMessageId = data[key];
|
||||
} else if (key === 'promptTokens') {
|
||||
@@ -48,12 +52,14 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
}
|
||||
};
|
||||
|
||||
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
const { onProgress: progressCallback, getPartialText } = createOnProgress({
|
||||
generation,
|
||||
onProgress: throttle(
|
||||
({ text: partialText }) => {
|
||||
saveMessage({
|
||||
/*
|
||||
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
|
||||
{
|
||||
messageId: responseMessageId,
|
||||
sender,
|
||||
conversationId,
|
||||
@@ -64,7 +70,8 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
isEdited: true,
|
||||
error: false,
|
||||
user,
|
||||
});
|
||||
} */
|
||||
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
|
||||
},
|
||||
3000,
|
||||
{ trailing: false },
|
||||
@@ -73,6 +80,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
|
||||
const getAbortData = () => ({
|
||||
conversationId,
|
||||
userMessagePromise,
|
||||
messageId: responseMessageId,
|
||||
sender,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
@@ -81,7 +89,7 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
promptTokens,
|
||||
});
|
||||
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData);
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
|
||||
|
||||
res.on('close', () => {
|
||||
logger.debug('[EditController] Request closed');
|
||||
@@ -115,12 +123,11 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
progressCallback,
|
||||
progressOptions: {
|
||||
res,
|
||||
text,
|
||||
// parentMessageId: overrideParentMessageId || userMessageId,
|
||||
},
|
||||
});
|
||||
|
||||
const conversation = await getConvo(user, conversationId);
|
||||
const { conversation = {} } = await client.responsePromise;
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
@@ -138,7 +145,11 @@ const EditController = async (req, res, next, initializeClient) => {
|
||||
});
|
||||
res.end();
|
||||
|
||||
await saveMessage({ ...response, user });
|
||||
await saveMessage(
|
||||
req,
|
||||
{ ...response, user },
|
||||
{ context: 'api/server/controllers/EditController.js - response end' },
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
const partialText = getPartialText();
|
||||
|
||||
@@ -120,21 +120,22 @@ const chatV1 = async (req, res) => {
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(res, messageData, errorMessage);
|
||||
return sendResponse(req, res, messageData, errorMessage);
|
||||
} else if (error?.message?.includes('string too long')) {
|
||||
return sendResponse(
|
||||
req,
|
||||
res,
|
||||
messageData,
|
||||
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
|
||||
);
|
||||
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
|
||||
return sendResponse(res, messageData, error.message);
|
||||
return sendResponse(req, res, messageData, error.message);
|
||||
} else {
|
||||
logger.error('[/assistants/chat/]', error);
|
||||
}
|
||||
|
||||
if (!openai || !thread_id || !run_id) {
|
||||
return sendResponse(res, messageData, defaultErrorMessage);
|
||||
return sendResponse(req, res, messageData, defaultErrorMessage);
|
||||
}
|
||||
|
||||
await sleep(2000);
|
||||
@@ -221,10 +222,10 @@ const chatV1 = async (req, res) => {
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error finalizing error process', error);
|
||||
return sendResponse(res, messageData, 'The Assistant run failed');
|
||||
return sendResponse(req, res, messageData, 'The Assistant run failed');
|
||||
}
|
||||
|
||||
return sendResponse(res, finalEvent);
|
||||
return sendResponse(req, res, finalEvent);
|
||||
};
|
||||
|
||||
try {
|
||||
@@ -382,6 +383,9 @@ const chatV1 = async (req, res) => {
|
||||
return files;
|
||||
};
|
||||
|
||||
/** @type {Promise<Run>|undefined} */
|
||||
let userMessagePromise;
|
||||
|
||||
const initializeThread = async () => {
|
||||
/** @type {[ undefined | MongoFile[]]}*/
|
||||
const [processedFiles] = await Promise.all([addVisionPrompt(), getRequestFileIds()]);
|
||||
@@ -438,7 +442,7 @@ const chatV1 = async (req, res) => {
|
||||
previousMessages.push(requestMessage);
|
||||
|
||||
/* asynchronous */
|
||||
saveUserMessage({ ...requestMessage, model });
|
||||
userMessagePromise = saveUserMessage(req, { ...requestMessage, model });
|
||||
|
||||
conversation = {
|
||||
conversationId,
|
||||
@@ -582,7 +586,10 @@ const chatV1 = async (req, res) => {
|
||||
});
|
||||
res.end();
|
||||
|
||||
await saveAssistantMessage({ ...responseMessage, model });
|
||||
if (userMessagePromise) {
|
||||
await userMessagePromise;
|
||||
}
|
||||
await saveAssistantMessage(req, { ...responseMessage, model });
|
||||
|
||||
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
|
||||
addTitle(req, {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
const { v4 } = require('uuid');
|
||||
const {
|
||||
Time,
|
||||
Constants,
|
||||
RunStatus,
|
||||
CacheKeys,
|
||||
ContentTypes,
|
||||
ToolCallTypes,
|
||||
EModelEndpoint,
|
||||
ViolationTypes,
|
||||
retrievalMimeTypes,
|
||||
AssistantStreamEvents,
|
||||
} = require('librechat-data-provider');
|
||||
@@ -14,12 +14,12 @@ const {
|
||||
initThread,
|
||||
recordUsage,
|
||||
saveUserMessage,
|
||||
checkMessageGaps,
|
||||
addThreadMetadata,
|
||||
saveAssistantMessage,
|
||||
} = require('~/server/services/Threads');
|
||||
const { sendResponse, sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
|
||||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||
const { sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
|
||||
const { createErrorHandler } = require('~/server/controllers/assistants/errors');
|
||||
const validateAuthor = require('~/server/middleware/assistants/validateAuthor');
|
||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||
@@ -44,7 +44,7 @@ const ten_minutes = 1000 * 60 * 10;
|
||||
const chatV2 = async (req, res) => {
|
||||
logger.debug('[/assistants/chat/] req.body', req.body);
|
||||
|
||||
/** @type {{ files: MongoFile[]}} */
|
||||
/** @type {{files: MongoFile[]}} */
|
||||
const {
|
||||
text,
|
||||
model,
|
||||
@@ -90,139 +90,20 @@ const chatV2 = async (req, res) => {
|
||||
/** @type {Run | undefined} - The completed run, undefined if incomplete */
|
||||
let completedRun;
|
||||
|
||||
const handleError = async (error) => {
|
||||
const defaultErrorMessage =
|
||||
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
|
||||
const messageData = {
|
||||
thread_id,
|
||||
assistant_id,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender: 'System',
|
||||
user: req.user.id,
|
||||
shouldSaveMessage: false,
|
||||
messageId: responseMessageId,
|
||||
endpoint,
|
||||
};
|
||||
const getContext = () => ({
|
||||
openai,
|
||||
run_id,
|
||||
endpoint,
|
||||
cacheKey,
|
||||
thread_id,
|
||||
completedRun,
|
||||
assistant_id,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
responseMessageId,
|
||||
});
|
||||
|
||||
if (error.message === 'Run cancelled') {
|
||||
return res.end();
|
||||
} else if (error.message === 'Request closed' && completedRun) {
|
||||
return;
|
||||
} else if (error.message === 'Request closed') {
|
||||
logger.debug('[/assistants/chat/] Request aborted on close');
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
endpoint === EModelEndpoint.azureAssistants
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(res, messageData, errorMessage);
|
||||
} else if (error?.message?.includes('string too long')) {
|
||||
return sendResponse(
|
||||
res,
|
||||
messageData,
|
||||
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
|
||||
);
|
||||
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
|
||||
return sendResponse(res, messageData, error.message);
|
||||
} else {
|
||||
logger.error('[/assistants/chat/]', error);
|
||||
}
|
||||
|
||||
if (!openai || !thread_id || !run_id) {
|
||||
return sendResponse(res, messageData, defaultErrorMessage);
|
||||
}
|
||||
|
||||
await sleep(2000);
|
||||
|
||||
try {
|
||||
const status = await cache.get(cacheKey);
|
||||
if (status === 'cancelled') {
|
||||
logger.debug('[/assistants/chat/] Run already cancelled');
|
||||
return res.end();
|
||||
}
|
||||
await cache.delete(cacheKey);
|
||||
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
||||
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error cancelling run', error);
|
||||
}
|
||||
|
||||
await sleep(2000);
|
||||
|
||||
let run;
|
||||
try {
|
||||
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
||||
await recordUsage({
|
||||
...run.usage,
|
||||
model: run.model,
|
||||
user: req.user.id,
|
||||
conversationId,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error fetching or processing run', error);
|
||||
}
|
||||
|
||||
let finalEvent;
|
||||
try {
|
||||
const runMessages = await checkMessageGaps({
|
||||
openai,
|
||||
run_id,
|
||||
endpoint,
|
||||
thread_id,
|
||||
conversationId,
|
||||
latestMessageId: responseMessageId,
|
||||
});
|
||||
|
||||
const errorContentPart = {
|
||||
text: {
|
||||
value:
|
||||
error?.message ?? 'There was an error processing your request. Please try again later.',
|
||||
},
|
||||
type: ContentTypes.ERROR,
|
||||
};
|
||||
|
||||
if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) {
|
||||
runMessages[runMessages.length - 1].content = [errorContentPart];
|
||||
} else {
|
||||
const contentParts = runMessages[runMessages.length - 1].content;
|
||||
for (let i = 0; i < contentParts.length; i++) {
|
||||
const currentPart = contentParts[i];
|
||||
/** @type {CodeToolCall | RetrievalToolCall | FunctionToolCall | undefined} */
|
||||
const toolCall = currentPart?.[ContentTypes.TOOL_CALL];
|
||||
if (
|
||||
toolCall &&
|
||||
toolCall?.function &&
|
||||
!(toolCall?.function?.output || toolCall?.function?.output?.length)
|
||||
) {
|
||||
contentParts[i] = {
|
||||
...currentPart,
|
||||
[ContentTypes.TOOL_CALL]: {
|
||||
...toolCall,
|
||||
function: {
|
||||
...toolCall.function,
|
||||
output: 'error processing tool',
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
runMessages[runMessages.length - 1].content.push(errorContentPart);
|
||||
}
|
||||
|
||||
finalEvent = {
|
||||
final: true,
|
||||
conversation: await getConvo(req.user.id, conversationId),
|
||||
runMessages,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error finalizing error process', error);
|
||||
return sendResponse(res, messageData, 'The Assistant run failed');
|
||||
}
|
||||
|
||||
return sendResponse(res, finalEvent);
|
||||
};
|
||||
const handleError = createErrorHandler({ req, res, getContext });
|
||||
|
||||
try {
|
||||
res.on('close', async () => {
|
||||
@@ -365,6 +246,9 @@ const chatV2 = async (req, res) => {
|
||||
}
|
||||
};
|
||||
|
||||
/** @type {Promise<Run>|undefined} */
|
||||
let userMessagePromise;
|
||||
|
||||
const initializeThread = async () => {
|
||||
await getRequestFileIds();
|
||||
|
||||
@@ -407,7 +291,7 @@ const chatV2 = async (req, res) => {
|
||||
previousMessages.push(requestMessage);
|
||||
|
||||
/* asynchronous */
|
||||
saveUserMessage({ ...requestMessage, model });
|
||||
userMessagePromise = saveUserMessage(req, { ...requestMessage, model });
|
||||
|
||||
conversation = {
|
||||
conversationId,
|
||||
@@ -489,6 +373,11 @@ const chatV2 = async (req, res) => {
|
||||
},
|
||||
};
|
||||
|
||||
/** @type {undefined | TAssistantEndpoint} */
|
||||
const config = req.app.locals[endpoint] ?? {};
|
||||
/** @type {undefined | TBaseEndpoint} */
|
||||
const allConfig = req.app.locals.all;
|
||||
|
||||
const streamRunManager = new StreamRunManager({
|
||||
req,
|
||||
res,
|
||||
@@ -498,6 +387,7 @@ const chatV2 = async (req, res) => {
|
||||
attachedFileIds,
|
||||
parentMessageId: userMessageId,
|
||||
responseMessage: openai.responseMessage,
|
||||
streamRate: allConfig?.streamRate ?? config.streamRate,
|
||||
// streamOptions: {
|
||||
|
||||
// },
|
||||
@@ -510,6 +400,16 @@ const chatV2 = async (req, res) => {
|
||||
|
||||
response = streamRunManager;
|
||||
response.text = streamRunManager.intermediateText;
|
||||
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
messageCache.set(
|
||||
responseMessageId,
|
||||
{
|
||||
complete: true,
|
||||
text: response.text,
|
||||
},
|
||||
Time.FIVE_MINUTES,
|
||||
);
|
||||
};
|
||||
|
||||
await processRun();
|
||||
@@ -552,7 +452,10 @@ const chatV2 = async (req, res) => {
|
||||
});
|
||||
res.end();
|
||||
|
||||
await saveAssistantMessage({ ...responseMessage, model });
|
||||
if (userMessagePromise) {
|
||||
await userMessagePromise;
|
||||
}
|
||||
await saveAssistantMessage(req, { ...responseMessage, model });
|
||||
|
||||
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
|
||||
addTitle(req, {
|
||||
|
||||
193
api/server/controllers/assistants/errors.js
Normal file
193
api/server/controllers/assistants/errors.js
Normal file
@@ -0,0 +1,193 @@
|
||||
// errorHandler.js
|
||||
const { sendResponse } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { CacheKeys, ViolationTypes, ContentTypes } = require('librechat-data-provider');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const { recordUsage, checkMessageGaps } = require('~/server/services/Threads');
|
||||
|
||||
/**
|
||||
* @typedef {Object} ErrorHandlerContext
|
||||
* @property {OpenAIClient} openai - The OpenAI client
|
||||
* @property {string} thread_id - The thread ID
|
||||
* @property {string} run_id - The run ID
|
||||
* @property {boolean} completedRun - Whether the run has completed
|
||||
* @property {string} assistant_id - The assistant ID
|
||||
* @property {string} conversationId - The conversation ID
|
||||
* @property {string} parentMessageId - The parent message ID
|
||||
* @property {string} responseMessageId - The response message ID
|
||||
* @property {string} endpoint - The endpoint being used
|
||||
* @property {string} cacheKey - The cache key for the current request
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef {Object} ErrorHandlerDependencies
|
||||
* @property {Express.Request} req - The Express request object
|
||||
* @property {Express.Response} res - The Express response object
|
||||
* @property {() => ErrorHandlerContext} getContext - Function to get the current context
|
||||
* @property {string} [originPath] - The origin path for the error handler
|
||||
*/
|
||||
|
||||
/**
|
||||
* Creates an error handler function with the given dependencies
|
||||
* @param {ErrorHandlerDependencies} dependencies - The dependencies for the error handler
|
||||
* @returns {(error: Error) => Promise<void>} The error handler function
|
||||
*/
|
||||
const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/chat/' }) => {
|
||||
const cache = getLogStores(CacheKeys.ABORT_KEYS);
|
||||
|
||||
/**
|
||||
* Handles errors that occur during the chat process
|
||||
* @param {Error} error - The error that occurred
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
return async (error) => {
|
||||
const {
|
||||
openai,
|
||||
run_id,
|
||||
endpoint,
|
||||
cacheKey,
|
||||
thread_id,
|
||||
completedRun,
|
||||
assistant_id,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
responseMessageId,
|
||||
} = getContext();
|
||||
|
||||
const defaultErrorMessage =
|
||||
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
|
||||
const messageData = {
|
||||
thread_id,
|
||||
assistant_id,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender: 'System',
|
||||
user: req.user.id,
|
||||
shouldSaveMessage: false,
|
||||
messageId: responseMessageId,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
if (error.message === 'Run cancelled') {
|
||||
return res.end();
|
||||
} else if (error.message === 'Request closed' && completedRun) {
|
||||
return;
|
||||
} else if (error.message === 'Request closed') {
|
||||
logger.debug(`[${originPath}] Request aborted on close`);
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
endpoint === 'azureAssistants'
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(req, res, messageData, errorMessage);
|
||||
} else if (error?.message?.includes('string too long')) {
|
||||
return sendResponse(
|
||||
req,
|
||||
res,
|
||||
messageData,
|
||||
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
|
||||
);
|
||||
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
|
||||
return sendResponse(req, res, messageData, error.message);
|
||||
} else {
|
||||
logger.error(`[${originPath}]`, error);
|
||||
}
|
||||
|
||||
if (!openai || !thread_id || !run_id) {
|
||||
return sendResponse(req, res, messageData, defaultErrorMessage);
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
||||
|
||||
try {
|
||||
const status = await cache.get(cacheKey);
|
||||
if (status === 'cancelled') {
|
||||
logger.debug(`[${originPath}] Run already cancelled`);
|
||||
return res.end();
|
||||
}
|
||||
await cache.delete(cacheKey);
|
||||
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
||||
logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
|
||||
} catch (error) {
|
||||
logger.error(`[${originPath}] Error cancelling run`, error);
|
||||
}
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, 2000));
|
||||
|
||||
let run;
|
||||
try {
|
||||
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
||||
await recordUsage({
|
||||
...run.usage,
|
||||
model: run.model,
|
||||
user: req.user.id,
|
||||
conversationId,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(`[${originPath}] Error fetching or processing run`, error);
|
||||
}
|
||||
|
||||
let finalEvent;
|
||||
try {
|
||||
const runMessages = await checkMessageGaps({
|
||||
openai,
|
||||
run_id,
|
||||
endpoint,
|
||||
thread_id,
|
||||
conversationId,
|
||||
latestMessageId: responseMessageId,
|
||||
});
|
||||
|
||||
const errorContentPart = {
|
||||
text: {
|
||||
value:
|
||||
error?.message ?? 'There was an error processing your request. Please try again later.',
|
||||
},
|
||||
type: ContentTypes.ERROR,
|
||||
};
|
||||
|
||||
if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) {
|
||||
runMessages[runMessages.length - 1].content = [errorContentPart];
|
||||
} else {
|
||||
const contentParts = runMessages[runMessages.length - 1].content;
|
||||
for (let i = 0; i < contentParts.length; i++) {
|
||||
const currentPart = contentParts[i];
|
||||
/** @type {CodeToolCall | RetrievalToolCall | FunctionToolCall | undefined} */
|
||||
const toolCall = currentPart?.[ContentTypes.TOOL_CALL];
|
||||
if (
|
||||
toolCall &&
|
||||
toolCall?.function &&
|
||||
!(toolCall?.function?.output || toolCall?.function?.output?.length)
|
||||
) {
|
||||
contentParts[i] = {
|
||||
...currentPart,
|
||||
[ContentTypes.TOOL_CALL]: {
|
||||
...toolCall,
|
||||
function: {
|
||||
...toolCall.function,
|
||||
output: 'error processing tool',
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
runMessages[runMessages.length - 1].content.push(errorContentPart);
|
||||
}
|
||||
|
||||
finalEvent = {
|
||||
final: true,
|
||||
conversation: await getConvo(req.user.id, conversationId),
|
||||
runMessages,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`[${originPath}] Error finalizing error process`, error);
|
||||
return sendResponse(req, res, messageData, 'The Assistant run failed');
|
||||
}
|
||||
|
||||
return sendResponse(req, res, finalEvent);
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = { createErrorHandler };
|
||||
@@ -1,8 +1,9 @@
|
||||
const {
|
||||
EModelEndpoint,
|
||||
CacheKeys,
|
||||
defaultAssistantsVersion,
|
||||
SystemRoles,
|
||||
EModelEndpoint,
|
||||
defaultOrderQuery,
|
||||
defaultAssistantsVersion,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
initializeClient: initAzureClient,
|
||||
@@ -227,7 +228,7 @@ const fetchAssistants = async ({ req, res, overrideEndpoint }) => {
|
||||
body = await listAssistantsForAzure({ req, res, version, azureConfig, query });
|
||||
}
|
||||
|
||||
if (req.user.role === 'ADMIN') {
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
return body;
|
||||
} else if (!req.app.locals[endpoint]) {
|
||||
return body;
|
||||
|
||||
@@ -4,6 +4,7 @@ require('module-alias')({ base: path.resolve(__dirname, '..') });
|
||||
const cors = require('cors');
|
||||
const axios = require('axios');
|
||||
const express = require('express');
|
||||
const compression = require('compression');
|
||||
const passport = require('passport');
|
||||
const mongoSanitize = require('express-mongo-sanitize');
|
||||
const { jwtLogin, passportLogin } = require('~/strategies');
|
||||
@@ -17,8 +18,9 @@ const configureSocialLogins = require('./socialLogins');
|
||||
const AppService = require('./services/AppService');
|
||||
const noIndex = require('./middleware/noIndex');
|
||||
const routes = require('./routes');
|
||||
const staticCache = require('./utils/staticCache');
|
||||
|
||||
const { PORT, HOST, ALLOW_SOCIAL_LOGIN } = process.env ?? {};
|
||||
const { PORT, HOST, ALLOW_SOCIAL_LOGIN, DISABLE_COMPRESSION } = process.env ?? {};
|
||||
|
||||
const port = Number(PORT) || 3080;
|
||||
const host = HOST || 'localhost';
|
||||
@@ -43,12 +45,16 @@ const startServer = async () => {
|
||||
app.use(express.json({ limit: '3mb' }));
|
||||
app.use(mongoSanitize());
|
||||
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
|
||||
app.use(express.static(app.locals.paths.dist));
|
||||
app.use(express.static(app.locals.paths.fonts));
|
||||
app.use(express.static(app.locals.paths.assets));
|
||||
app.use(staticCache(app.locals.paths.dist));
|
||||
app.use(staticCache(app.locals.paths.fonts));
|
||||
app.use(staticCache(app.locals.paths.assets));
|
||||
app.set('trust proxy', 1); // trust first proxy
|
||||
app.use(cors());
|
||||
|
||||
if (DISABLE_COMPRESSION !== 'true') {
|
||||
app.use(compression());
|
||||
}
|
||||
|
||||
if (!ALLOW_SOCIAL_LOGIN) {
|
||||
console.warn(
|
||||
'Social logins are disabled. Set Environment Variable "ALLOW_SOCIAL_LOGIN" to true to enable them.',
|
||||
@@ -61,7 +67,7 @@ const startServer = async () => {
|
||||
passport.use(passportLogin());
|
||||
|
||||
// LDAP Auth
|
||||
if (process.env.LDAP_URL && process.env.LDAP_BIND_DN && process.env.LDAP_USER_SEARCH_BASE) {
|
||||
if (process.env.LDAP_URL && process.env.LDAP_USER_SEARCH_BASE) {
|
||||
passport.use(ldapLogin);
|
||||
}
|
||||
|
||||
@@ -81,6 +87,7 @@ const startServer = async () => {
|
||||
app.use('/api/convos', routes.convos);
|
||||
app.use('/api/presets', routes.presets);
|
||||
app.use('/api/prompts', routes.prompts);
|
||||
app.use('/api/categories', routes.categories);
|
||||
app.use('/api/tokenizer', routes.tokenizer);
|
||||
app.use('/api/endpoints', routes.endpoints);
|
||||
app.use('/api/balance', routes.balance);
|
||||
@@ -91,7 +98,9 @@ const startServer = async () => {
|
||||
app.use('/api/files', await routes.files.initialize());
|
||||
app.use('/images/', validateImageRequest, routes.staticRoute);
|
||||
app.use('/api/share', routes.share);
|
||||
app.use('/api/roles', routes.roles);
|
||||
|
||||
app.use('/api/tags', routes.tags);
|
||||
app.use((req, res) => {
|
||||
res.sendFile(path.join(app.locals.paths.dist, 'index.html'));
|
||||
});
|
||||
|
||||
@@ -1,31 +1,39 @@
|
||||
const { isAssistantsEndpoint } = require('librechat-data-provider');
|
||||
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
|
||||
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
|
||||
const { saveMessage, getConvo, getConvoTitle } = require('~/models');
|
||||
const clearPendingReq = require('~/cache/clearPendingReq');
|
||||
const abortControllers = require('./abortControllers');
|
||||
const { saveMessage, getConvo } = require('~/models');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { abortRun } = require('./abortRun');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
async function abortMessage(req, res) {
|
||||
let { abortKey, conversationId, endpoint } = req.body;
|
||||
|
||||
if (!abortKey && conversationId) {
|
||||
abortKey = conversationId;
|
||||
}
|
||||
let { abortKey, endpoint } = req.body;
|
||||
|
||||
if (isAssistantsEndpoint(endpoint)) {
|
||||
return await abortRun(req, res);
|
||||
}
|
||||
|
||||
const conversationId = abortKey?.split(':')?.[0] ?? req.user.id;
|
||||
|
||||
if (!abortControllers.has(abortKey) && abortControllers.has(conversationId)) {
|
||||
abortKey = conversationId;
|
||||
}
|
||||
|
||||
if (!abortControllers.has(abortKey) && !res.headersSent) {
|
||||
return res.status(204).send({ message: 'Request not found' });
|
||||
}
|
||||
|
||||
const { abortController } = abortControllers.get(abortKey);
|
||||
const { abortController } = abortControllers.get(abortKey) ?? {};
|
||||
if (!abortController) {
|
||||
return res.status(204).send({ message: 'Request not found' });
|
||||
}
|
||||
const finalEvent = await abortController.abortCompletion();
|
||||
logger.debug('[abortMessage] Aborted request', { abortKey });
|
||||
logger.debug(
|
||||
`[abortMessage] ID: ${req.user.id} | ${req.user.email} | Aborted request: ` +
|
||||
JSON.stringify({ abortKey }),
|
||||
);
|
||||
abortControllers.delete(abortKey);
|
||||
|
||||
if (res.headersSent && finalEvent) {
|
||||
@@ -50,12 +58,35 @@ const handleAbort = () => {
|
||||
};
|
||||
};
|
||||
|
||||
const createAbortController = (req, res, getAbortData) => {
|
||||
const createAbortController = (req, res, getAbortData, getReqData) => {
|
||||
const abortController = new AbortController();
|
||||
const { endpointOption } = req.body;
|
||||
const onStart = (userMessage) => {
|
||||
|
||||
abortController.getAbortData = function () {
|
||||
return getAbortData();
|
||||
};
|
||||
|
||||
/**
|
||||
* @param {TMessage} userMessage
|
||||
* @param {string} responseMessageId
|
||||
*/
|
||||
const onStart = (userMessage, responseMessageId) => {
|
||||
sendMessage(res, { message: userMessage, created: true });
|
||||
|
||||
const abortKey = userMessage?.conversationId ?? req.user.id;
|
||||
const prevRequest = abortControllers.get(abortKey);
|
||||
|
||||
if (prevRequest && prevRequest?.abortController) {
|
||||
const data = prevRequest.abortController.getAbortData();
|
||||
getReqData({ userMessage: data?.userMessage });
|
||||
const addedAbortKey = `${abortKey}:${responseMessageId}`;
|
||||
abortControllers.set(addedAbortKey, { abortController, ...endpointOption });
|
||||
res.on('finish', function () {
|
||||
abortControllers.delete(addedAbortKey);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
abortControllers.set(abortKey, { abortController, ...endpointOption });
|
||||
|
||||
res.on('finish', function () {
|
||||
@@ -65,7 +96,8 @@ const createAbortController = (req, res, getAbortData) => {
|
||||
|
||||
abortController.abortCompletion = async function () {
|
||||
abortController.abort();
|
||||
const { conversationId, userMessage, promptTokens, ...responseData } = getAbortData();
|
||||
const { conversationId, userMessage, userMessagePromise, promptTokens, ...responseData } =
|
||||
getAbortData();
|
||||
const completionTokens = await countTokens(responseData?.text ?? '');
|
||||
const user = req.user.id;
|
||||
|
||||
@@ -87,12 +119,26 @@ const createAbortController = (req, res, getAbortData) => {
|
||||
{ promptTokens, completionTokens },
|
||||
);
|
||||
|
||||
saveMessage({ ...responseMessage, user });
|
||||
saveMessage(
|
||||
req,
|
||||
{ ...responseMessage, user },
|
||||
{ context: 'api/server/middleware/abortMiddleware.js' },
|
||||
);
|
||||
|
||||
let conversation;
|
||||
if (userMessagePromise) {
|
||||
const resolved = await userMessagePromise;
|
||||
conversation = resolved?.conversation;
|
||||
}
|
||||
|
||||
if (!conversation) {
|
||||
conversation = await getConvo(req.user.id, conversationId);
|
||||
}
|
||||
|
||||
return {
|
||||
title: await getConvoTitle(user, conversationId),
|
||||
title: conversation && !conversation.title ? null : conversation?.title || 'New Chat',
|
||||
final: true,
|
||||
conversation: await getConvo(user, conversationId),
|
||||
conversation,
|
||||
requestMessage: userMessage,
|
||||
responseMessage: responseMessage,
|
||||
};
|
||||
@@ -151,7 +197,7 @@ const handleAbortError = async (res, req, error, data) => {
|
||||
}
|
||||
};
|
||||
|
||||
await sendError(res, options, callback);
|
||||
await sendError(req, res, options, callback);
|
||||
};
|
||||
|
||||
if (partialText && partialText.length > 5) {
|
||||
|
||||
@@ -19,7 +19,8 @@ const validateAssistant = async (req, res, next) => {
|
||||
}
|
||||
|
||||
const { supportedIds, excludedIds } = assistantsConfig;
|
||||
const error = { message: 'Assistant not supported' };
|
||||
const error = { message: 'validateAssistant: Assistant not supported' };
|
||||
|
||||
if (supportedIds?.length && !supportedIds.includes(assistant_id)) {
|
||||
return await handleAbortError(res, req, error, {
|
||||
sender: 'System',
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { getAssistant } = require('~/models/Assistant');
|
||||
|
||||
/**
|
||||
@@ -11,7 +12,7 @@ const { getAssistant } = require('~/models/Assistant');
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const validateAuthor = async ({ req, openai, overrideEndpoint, overrideAssistantId }) => {
|
||||
if (req.user.role === 'ADMIN') {
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
@@ -16,7 +17,7 @@ const { logger } = require('~/config');
|
||||
const canDeleteAccount = async (req, res, next = () => {}) => {
|
||||
const { user } = req;
|
||||
const { ALLOW_ACCOUNT_DELETION = true } = process.env;
|
||||
if (user?.role === 'ADMIN' || isEnabled(ALLOW_ACCOUNT_DELETION)) {
|
||||
if (user?.role === SystemRoles.ADMIN || isEnabled(ALLOW_ACCOUNT_DELETION)) {
|
||||
return next();
|
||||
} else {
|
||||
logger.error(`[User] [Delete Account] [User cannot delete account] [User: ${user?.id}]`);
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
const clearPendingReq = require('../../cache/clearPendingReq');
|
||||
const { logViolation, getLogStores } = require('../../cache');
|
||||
const { Time } = require('librechat-data-provider');
|
||||
const clearPendingReq = require('~/cache/clearPendingReq');
|
||||
const { logViolation, getLogStores } = require('~/cache');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const denyRequest = require('./denyRequest');
|
||||
|
||||
const {
|
||||
@@ -7,7 +9,6 @@ const {
|
||||
CONCURRENT_MESSAGE_MAX = 1,
|
||||
CONCURRENT_VIOLATION_SCORE: score,
|
||||
} = process.env ?? {};
|
||||
const ttl = 1000 * 60 * 1;
|
||||
|
||||
/**
|
||||
* Middleware to limit concurrent requests for a user.
|
||||
@@ -38,7 +39,7 @@ const concurrentLimiter = async (req, res, next) => {
|
||||
const limit = Math.max(CONCURRENT_MESSAGE_MAX, 1);
|
||||
const type = 'concurrent';
|
||||
|
||||
const key = `${USE_REDIS ? namespace : ''}:${userId}`;
|
||||
const key = `${isEnabled(USE_REDIS) ? namespace : ''}:${userId}`;
|
||||
const pendingRequests = +((await cache.get(key)) ?? 0);
|
||||
|
||||
if (pendingRequests >= limit) {
|
||||
@@ -51,7 +52,7 @@ const concurrentLimiter = async (req, res, next) => {
|
||||
await logViolation(req, res, type, errorMessage, score);
|
||||
return await denyRequest(req, res, errorMessage);
|
||||
} else {
|
||||
await cache.set(key, pendingRequests + 1, ttl);
|
||||
await cache.set(key, pendingRequests + 1, Time.ONE_MINUTE);
|
||||
}
|
||||
|
||||
// Ensure the requests are removed from the store once the request is done
|
||||
|
||||
@@ -41,10 +41,14 @@ const denyRequest = async (req, res, errorMessage) => {
|
||||
const shouldSaveMessage = _convoId && parentMessageId && parentMessageId !== Constants.NO_PARENT;
|
||||
|
||||
if (shouldSaveMessage) {
|
||||
await saveMessage({ ...userMessage, user: req.user.id });
|
||||
await saveMessage(
|
||||
req,
|
||||
{ ...userMessage, user: req.user.id },
|
||||
{ context: `api/server/middleware/denyRequest.js - ${responseText}` },
|
||||
);
|
||||
}
|
||||
|
||||
return await sendError(res, {
|
||||
return await sendError(req, res, {
|
||||
sender: getResponseSender(req.body),
|
||||
messageId: crypto.randomUUID(),
|
||||
conversationId,
|
||||
|
||||
@@ -14,14 +14,18 @@ const requireJwtAuth = require('./requireJwtAuth');
|
||||
const validateModel = require('./validateModel');
|
||||
const moderateText = require('./moderateText');
|
||||
const setHeaders = require('./setHeaders');
|
||||
const validate = require('./validate');
|
||||
const limiters = require('./limiters');
|
||||
const uaParser = require('./uaParser');
|
||||
const checkBan = require('./checkBan');
|
||||
const noIndex = require('./noIndex');
|
||||
const roles = require('./roles');
|
||||
|
||||
module.exports = {
|
||||
...abortMiddleware,
|
||||
...validate,
|
||||
...limiters,
|
||||
...roles,
|
||||
noIndex,
|
||||
checkBan,
|
||||
uaParser,
|
||||
|
||||
@@ -13,7 +13,7 @@ const requireLdapAuth = (req, res, next) => {
|
||||
console.log({
|
||||
title: '(requireLdapAuth) Error: No user',
|
||||
});
|
||||
return res.status(422).send(info);
|
||||
return res.status(404).send(info);
|
||||
}
|
||||
req.user = user;
|
||||
next();
|
||||
|
||||
14
api/server/middleware/roles/checkAdmin.js
Normal file
14
api/server/middleware/roles/checkAdmin.js
Normal file
@@ -0,0 +1,14 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
|
||||
function checkAdmin(req, res, next) {
|
||||
try {
|
||||
if (req.user.role !== SystemRoles.ADMIN) {
|
||||
return res.status(403).json({ message: 'Forbidden' });
|
||||
}
|
||||
next();
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Internal Server Error' });
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = checkAdmin;
|
||||
52
api/server/middleware/roles/generateCheckAccess.js
Normal file
52
api/server/middleware/roles/generateCheckAccess.js
Normal file
@@ -0,0 +1,52 @@
|
||||
const { SystemRoles } = require('librechat-data-provider');
|
||||
const { getRoleByName } = require('~/models/Role');
|
||||
|
||||
/**
|
||||
* Middleware to check if a user has one or more required permissions, optionally based on `req.body` properties.
|
||||
*
|
||||
* @param {PermissionTypes} permissionType - The type of permission to check.
|
||||
* @param {Permissions[]} permissions - The list of specific permissions to check.
|
||||
* @param {Record<Permissions, string[]>} [bodyProps] - An optional object where keys are permissions and values are arrays of `req.body` properties to check.
|
||||
* @returns {Function} Express middleware function.
|
||||
*/
|
||||
const generateCheckAccess = (permissionType, permissions, bodyProps = {}) => {
|
||||
return async (req, res, next) => {
|
||||
try {
|
||||
const { user } = req;
|
||||
if (!user) {
|
||||
return res.status(401).json({ message: 'Authorization required' });
|
||||
}
|
||||
|
||||
if (user.role === SystemRoles.ADMIN) {
|
||||
return next();
|
||||
}
|
||||
|
||||
const role = await getRoleByName(user.role);
|
||||
if (role && role[permissionType]) {
|
||||
const hasAnyPermission = permissions.some((permission) => {
|
||||
if (role[permissionType][permission]) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (bodyProps[permission] && req.body) {
|
||||
return bodyProps[permission].some((prop) =>
|
||||
Object.prototype.hasOwnProperty.call(req.body, prop),
|
||||
);
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
|
||||
if (hasAnyPermission) {
|
||||
return next();
|
||||
}
|
||||
}
|
||||
|
||||
return res.status(403).json({ message: 'Forbidden: Insufficient permissions' });
|
||||
} catch (error) {
|
||||
return res.status(500).json({ message: `Server error: ${error.message}` });
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = generateCheckAccess;
|
||||
7
api/server/middleware/roles/index.js
Normal file
7
api/server/middleware/roles/index.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const checkAdmin = require('./checkAdmin');
|
||||
const generateCheckAccess = require('./generateCheckAccess');
|
||||
|
||||
module.exports = {
|
||||
checkAdmin,
|
||||
generateCheckAccess,
|
||||
};
|
||||
112
api/server/middleware/spec/validateImages.spec.js
Normal file
112
api/server/middleware/spec/validateImages.spec.js
Normal file
@@ -0,0 +1,112 @@
|
||||
const jwt = require('jsonwebtoken');
|
||||
const validateImageRequest = require('~/server/middleware/validateImageRequest');
|
||||
|
||||
describe('validateImageRequest middleware', () => {
|
||||
let req, res, next;
|
||||
|
||||
beforeEach(() => {
|
||||
req = {
|
||||
app: { locals: { secureImageLinks: true } },
|
||||
headers: {},
|
||||
originalUrl: '',
|
||||
};
|
||||
res = {
|
||||
status: jest.fn().mockReturnThis(),
|
||||
send: jest.fn(),
|
||||
};
|
||||
next = jest.fn();
|
||||
process.env.JWT_REFRESH_SECRET = 'test-secret';
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
test('should call next() if secureImageLinks is false', () => {
|
||||
req.app.locals.secureImageLinks = false;
|
||||
validateImageRequest(req, res, next);
|
||||
expect(next).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should return 401 if refresh token is not provided', () => {
|
||||
validateImageRequest(req, res, next);
|
||||
expect(res.status).toHaveBeenCalledWith(401);
|
||||
expect(res.send).toHaveBeenCalledWith('Unauthorized');
|
||||
});
|
||||
|
||||
test('should return 403 if refresh token is invalid', () => {
|
||||
req.headers.cookie = 'refreshToken=invalid-token';
|
||||
validateImageRequest(req, res, next);
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.send).toHaveBeenCalledWith('Access Denied');
|
||||
});
|
||||
|
||||
test('should return 403 if refresh token is expired', () => {
|
||||
const expiredToken = jwt.sign(
|
||||
{ id: '123', exp: Math.floor(Date.now() / 1000) - 3600 },
|
||||
process.env.JWT_REFRESH_SECRET,
|
||||
);
|
||||
req.headers.cookie = `refreshToken=${expiredToken}`;
|
||||
validateImageRequest(req, res, next);
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.send).toHaveBeenCalledWith('Access Denied');
|
||||
});
|
||||
|
||||
test('should call next() for valid image path', () => {
|
||||
const validToken = jwt.sign(
|
||||
{ id: '123', exp: Math.floor(Date.now() / 1000) + 3600 },
|
||||
process.env.JWT_REFRESH_SECRET,
|
||||
);
|
||||
req.headers.cookie = `refreshToken=${validToken}`;
|
||||
req.originalUrl = '/images/123/example.jpg';
|
||||
validateImageRequest(req, res, next);
|
||||
expect(next).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
test('should return 403 for invalid image path', () => {
|
||||
const validToken = jwt.sign(
|
||||
{ id: '123', exp: Math.floor(Date.now() / 1000) + 3600 },
|
||||
process.env.JWT_REFRESH_SECRET,
|
||||
);
|
||||
req.headers.cookie = `refreshToken=${validToken}`;
|
||||
req.originalUrl = '/images/456/example.jpg';
|
||||
validateImageRequest(req, res, next);
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.send).toHaveBeenCalledWith('Access Denied');
|
||||
});
|
||||
|
||||
// File traversal tests
|
||||
test('should prevent file traversal attempts', () => {
|
||||
const validToken = jwt.sign(
|
||||
{ id: '123', exp: Math.floor(Date.now() / 1000) + 3600 },
|
||||
process.env.JWT_REFRESH_SECRET,
|
||||
);
|
||||
req.headers.cookie = `refreshToken=${validToken}`;
|
||||
|
||||
const traversalAttempts = [
|
||||
'/images/123/../../../etc/passwd',
|
||||
'/images/123/..%2F..%2F..%2Fetc%2Fpasswd',
|
||||
'/images/123/image.jpg/../../../etc/passwd',
|
||||
'/images/123/%2e%2e%2f%2e%2e%2f%2e%2e%2fetc%2fpasswd',
|
||||
];
|
||||
|
||||
traversalAttempts.forEach((attempt) => {
|
||||
req.originalUrl = attempt;
|
||||
validateImageRequest(req, res, next);
|
||||
expect(res.status).toHaveBeenCalledWith(403);
|
||||
expect(res.send).toHaveBeenCalledWith('Access Denied');
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
});
|
||||
|
||||
test('should handle URL encoded characters in valid paths', () => {
|
||||
const validToken = jwt.sign(
|
||||
{ id: '123', exp: Math.floor(Date.now() / 1000) + 3600 },
|
||||
process.env.JWT_REFRESH_SECRET,
|
||||
);
|
||||
req.headers.cookie = `refreshToken=${validToken}`;
|
||||
req.originalUrl = '/images/123/image%20with%20spaces.jpg';
|
||||
validateImageRequest(req, res, next);
|
||||
expect(next).toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
73
api/server/middleware/validate/convoAccess.js
Normal file
73
api/server/middleware/validate/convoAccess.js
Normal file
@@ -0,0 +1,73 @@
|
||||
const { Constants, ViolationTypes, Time } = require('librechat-data-provider');
|
||||
const { searchConversation } = require('~/models/Conversation');
|
||||
const denyRequest = require('~/server/middleware/denyRequest');
|
||||
const { logViolation, getLogStores } = require('~/cache');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
const { USE_REDIS, CONVO_ACCESS_VIOLATION_SCORE: score = 0 } = process.env ?? {};
|
||||
|
||||
/**
|
||||
* Middleware to validate user's authorization for a conversation.
|
||||
*
|
||||
* This middleware checks if a user has the right to access a specific conversation.
|
||||
* If the user doesn't have access, an error is returned. If the conversation doesn't exist,
|
||||
* a not found error is returned. If the access is valid, the middleware allows the request to proceed.
|
||||
* If the `cache` store is not available, the middleware will skip its logic.
|
||||
*
|
||||
* @function
|
||||
* @param {Express.Request} req - Express request object containing user information.
|
||||
* @param {Express.Response} res - Express response object.
|
||||
* @param {function} next - Express next middleware function.
|
||||
* @throws {Error} Throws an error if the user doesn't have access to the conversation.
|
||||
*/
|
||||
const validateConvoAccess = async (req, res, next) => {
|
||||
const namespace = ViolationTypes.CONVO_ACCESS;
|
||||
const cache = getLogStores(namespace);
|
||||
|
||||
const conversationId = req.body.conversationId;
|
||||
|
||||
if (!conversationId || conversationId === Constants.NEW_CONVO) {
|
||||
return next();
|
||||
}
|
||||
|
||||
const userId = req.user?.id ?? req.user?._id ?? '';
|
||||
const type = ViolationTypes.CONVO_ACCESS;
|
||||
const key = `${isEnabled(USE_REDIS) ? namespace : ''}:${userId}:${conversationId}`;
|
||||
|
||||
try {
|
||||
if (cache) {
|
||||
const cachedAccess = await cache.get(key);
|
||||
if (cachedAccess === 'authorized') {
|
||||
return next();
|
||||
}
|
||||
}
|
||||
|
||||
const conversation = await searchConversation(conversationId);
|
||||
|
||||
if (!conversation) {
|
||||
return next();
|
||||
}
|
||||
|
||||
if (conversation.user !== userId) {
|
||||
const errorMessage = {
|
||||
type,
|
||||
error: 'User not authorized for this conversation',
|
||||
};
|
||||
|
||||
if (cache) {
|
||||
await logViolation(req, res, type, errorMessage, score);
|
||||
}
|
||||
return await denyRequest(req, res, errorMessage);
|
||||
}
|
||||
|
||||
if (cache) {
|
||||
await cache.set(key, 'authorized', Time.TEN_MINUTES);
|
||||
}
|
||||
next();
|
||||
} catch (error) {
|
||||
console.error('Error validating conversation access:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = validateConvoAccess;
|
||||
4
api/server/middleware/validate/index.js
Normal file
4
api/server/middleware/validate/index.js
Normal file
@@ -0,0 +1,4 @@
|
||||
const validateConvoAccess = require('./convoAccess');
|
||||
module.exports = {
|
||||
validateConvoAccess,
|
||||
};
|
||||
@@ -31,10 +31,14 @@ function validateImageRequest(req, res, next) {
|
||||
return res.status(403).send('Access Denied');
|
||||
}
|
||||
|
||||
if (req.path.includes(payload.id)) {
|
||||
const fullPath = decodeURIComponent(req.originalUrl);
|
||||
const pathPattern = new RegExp(`^/images/${payload.id}/[^/]+$`);
|
||||
|
||||
if (pathPattern.test(fullPath)) {
|
||||
logger.debug('[validateImageRequest] Image request validated');
|
||||
next();
|
||||
} else {
|
||||
logger.warn('[validateImageRequest] Invalid image path');
|
||||
res.status(403).send('Access Denied');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { getConvo } = require('../../models');
|
||||
const { getConvo } = require('~/models');
|
||||
|
||||
// Middleware to validate conversationId and user relationship
|
||||
const validateMessageReq = async (req, res, next) => {
|
||||
|
||||
@@ -76,7 +76,9 @@ describe.skip('GET /', () => {
|
||||
openidLoginEnabled: true,
|
||||
openidLabel: 'Test OpenID',
|
||||
openidImageUrl: 'http://test-server.com',
|
||||
ldapLoginEnabled: true,
|
||||
ldap: {
|
||||
enabled: true,
|
||||
},
|
||||
serverDomain: 'http://test-server.com',
|
||||
emailLoginEnabled: 'true',
|
||||
registrationEnabled: 'true',
|
||||
|
||||
55
api/server/routes/__tests__/ldap.spec.js
Normal file
55
api/server/routes/__tests__/ldap.spec.js
Normal file
@@ -0,0 +1,55 @@
|
||||
const request = require('supertest');
|
||||
const express = require('express');
|
||||
const { getLdapConfig } = require('~/server/services/Config/ldap');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
|
||||
jest.mock('~/server/services/Config/ldap');
|
||||
jest.mock('~/server/utils');
|
||||
|
||||
const app = express();
|
||||
|
||||
// Mock the route handler
|
||||
app.get('/api/config', (req, res) => {
|
||||
const ldapConfig = getLdapConfig();
|
||||
res.json({ ldap: ldapConfig });
|
||||
});
|
||||
|
||||
describe('LDAP Config Tests', () => {
|
||||
afterEach(() => {
|
||||
jest.resetAllMocks();
|
||||
});
|
||||
|
||||
it('should return LDAP config with username property when LDAP_LOGIN_USES_USERNAME is enabled', async () => {
|
||||
getLdapConfig.mockReturnValue({ enabled: true, username: true });
|
||||
isEnabled.mockReturnValue(true);
|
||||
|
||||
const response = await request(app).get('/api/config');
|
||||
|
||||
expect(response.statusCode).toBe(200);
|
||||
expect(response.body.ldap).toEqual({
|
||||
enabled: true,
|
||||
username: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should return LDAP config without username property when LDAP_LOGIN_USES_USERNAME is not enabled', async () => {
|
||||
getLdapConfig.mockReturnValue({ enabled: true });
|
||||
isEnabled.mockReturnValue(false);
|
||||
|
||||
const response = await request(app).get('/api/config');
|
||||
|
||||
expect(response.statusCode).toBe(200);
|
||||
expect(response.body.ldap).toEqual({
|
||||
enabled: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should not return LDAP config when LDAP is not enabled', async () => {
|
||||
getLdapConfig.mockReturnValue(undefined);
|
||||
|
||||
const response = await request(app).get('/api/config');
|
||||
|
||||
expect(response.statusCode).toBe(200);
|
||||
expect(response.body.ldap).toBeUndefined();
|
||||
});
|
||||
});
|
||||
@@ -51,8 +51,8 @@ router.post('/', setHeaders, async (req, res) => {
|
||||
});
|
||||
|
||||
if (!overrideParentMessageId) {
|
||||
await saveMessage({ ...userMessage, user: req.user.id });
|
||||
await saveConvo(req.user.id, {
|
||||
await saveMessage(req, { ...userMessage, user: req.user.id });
|
||||
await saveConvo(req, {
|
||||
...userMessage,
|
||||
...endpointOption,
|
||||
conversationId,
|
||||
@@ -93,7 +93,7 @@ const ask = async ({
|
||||
const currentTimestamp = Date.now();
|
||||
if (currentTimestamp - lastSavedTimestamp > 500) {
|
||||
lastSavedTimestamp = currentTimestamp;
|
||||
saveMessage({
|
||||
saveMessage(req, {
|
||||
messageId: responseMessageId,
|
||||
sender: endpointOption?.jailbreak ? 'Sydney' : 'BingAI',
|
||||
conversationId,
|
||||
@@ -159,7 +159,7 @@ const ask = async ({
|
||||
isCreatedByUser: false,
|
||||
};
|
||||
|
||||
await saveMessage({ ...responseMessage, user });
|
||||
await saveMessage(req, { ...responseMessage, user });
|
||||
responseMessage.messageId = newResponseMessageId;
|
||||
|
||||
// STEP2 update the conversation
|
||||
@@ -183,7 +183,7 @@ const ask = async ({
|
||||
}
|
||||
}
|
||||
|
||||
await saveConvo(user, conversationUpdate);
|
||||
await saveConvo(req, conversationUpdate);
|
||||
conversationId = newConversationId;
|
||||
|
||||
// STEP3 update the user message
|
||||
@@ -192,7 +192,7 @@ const ask = async ({
|
||||
|
||||
// If response has parentMessageId, the fake userMessage.messageId should be updated to the real one.
|
||||
if (!overrideParentMessageId) {
|
||||
await saveMessage({
|
||||
await saveMessage(req, {
|
||||
...userMessage,
|
||||
user,
|
||||
messageId: userMessageId,
|
||||
@@ -213,7 +213,7 @@ const ask = async ({
|
||||
if (userParentMessageId == Constants.NO_PARENT) {
|
||||
// const title = await titleConvo({ endpoint: endpointOption?.endpoint, text, response: responseMessage });
|
||||
const title = await response.details.title;
|
||||
await saveConvo(user, {
|
||||
await saveConvo(req, {
|
||||
conversationId: conversationId,
|
||||
title,
|
||||
});
|
||||
@@ -229,7 +229,7 @@ const ask = async ({
|
||||
isCreatedByUser: false,
|
||||
text: `${getPartialMessage() ?? ''}\n\nError message: "${error.message}"`,
|
||||
};
|
||||
await saveMessage({ ...errorMessage, user });
|
||||
await saveMessage(req, { ...errorMessage, user });
|
||||
handleError(res, errorMessage);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -70,8 +70,8 @@ router.post('/', setHeaders, async (req, res) => {
|
||||
});
|
||||
|
||||
if (!overrideParentMessageId) {
|
||||
await saveMessage({ ...userMessage, user: req.user.id });
|
||||
await saveConvo(req.user.id, {
|
||||
await saveMessage(req, { ...userMessage, user: req.user.id });
|
||||
await saveConvo(req, {
|
||||
...userMessage,
|
||||
...endpointOption,
|
||||
conversationId,
|
||||
@@ -118,7 +118,7 @@ const ask = async ({
|
||||
const currentTimestamp = Date.now();
|
||||
if (currentTimestamp - lastSavedTimestamp > 500) {
|
||||
lastSavedTimestamp = currentTimestamp;
|
||||
saveMessage({
|
||||
saveMessage(req, {
|
||||
messageId: responseMessageId,
|
||||
sender: model,
|
||||
conversationId,
|
||||
@@ -197,7 +197,7 @@ const ask = async ({
|
||||
isCreatedByUser: false,
|
||||
};
|
||||
|
||||
await saveMessage({ ...responseMessage, user });
|
||||
await saveMessage(req, { ...responseMessage, user });
|
||||
responseMessage.messageId = newResponseMessageId;
|
||||
|
||||
let conversationUpdate = {
|
||||
@@ -216,12 +216,12 @@ const ask = async ({
|
||||
conversationUpdate.invocationId = response.invocationId;
|
||||
}
|
||||
|
||||
await saveConvo(user, conversationUpdate);
|
||||
await saveConvo(req, conversationUpdate);
|
||||
userMessage.messageId = newUserMessageId;
|
||||
|
||||
// If response has parentMessageId, the fake userMessage.messageId should be updated to the real one.
|
||||
if (!overrideParentMessageId) {
|
||||
await saveMessage({
|
||||
await saveMessage(req, {
|
||||
...userMessage,
|
||||
user,
|
||||
messageId: userMessageId,
|
||||
@@ -245,7 +245,7 @@ const ask = async ({
|
||||
response: responseMessage,
|
||||
});
|
||||
|
||||
await saveConvo(user, {
|
||||
await saveConvo(req, {
|
||||
conversationId: conversationId,
|
||||
title,
|
||||
});
|
||||
@@ -266,7 +266,7 @@ const ask = async ({
|
||||
isCreatedByUser: false,
|
||||
};
|
||||
|
||||
saveMessage({ ...responseMessage, user });
|
||||
saveMessage(req, { ...responseMessage, user });
|
||||
|
||||
return {
|
||||
title: await getConvoTitle(user, conversationId),
|
||||
@@ -288,7 +288,7 @@ const ask = async ({
|
||||
model,
|
||||
isCreatedByUser: false,
|
||||
};
|
||||
await saveMessage({ ...errorMessage, user });
|
||||
await saveMessage(req, { ...errorMessage, user });
|
||||
handleError(res, errorMessage);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const express = require('express');
|
||||
const AskController = require('~/server/controllers/AskController');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/google');
|
||||
const { initializeClient, addTitle } = require('~/server/services/Endpoints/google');
|
||||
const {
|
||||
setHeaders,
|
||||
handleAbort,
|
||||
@@ -20,7 +20,7 @@ router.post(
|
||||
buildEndpointOption,
|
||||
setHeaders,
|
||||
async (req, res, next) => {
|
||||
await AskController(req, res, next, initializeClient);
|
||||
await AskController(req, res, next, initializeClient, addTitle);
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
const express = require('express');
|
||||
const throttle = require('lodash/throttle');
|
||||
const { getResponseSender, Constants } = require('librechat-data-provider');
|
||||
const { getResponseSender, Constants, CacheKeys, Time } = require('librechat-data-provider');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
|
||||
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
|
||||
const { sendMessage, createOnProgress } = require('~/server/utils');
|
||||
const { addTitle } = require('~/server/services/Endpoints/openAI');
|
||||
const { saveMessage, updateMessage } = require('~/models');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const {
|
||||
handleAbort,
|
||||
createAbortController,
|
||||
@@ -41,6 +42,7 @@ router.post(
|
||||
logger.debug('[/ask/gptPlugins]', { text, conversationId, ...endpointOption });
|
||||
|
||||
let userMessage;
|
||||
let userMessagePromise;
|
||||
let promptTokens;
|
||||
let userMessageId;
|
||||
let responseMessageId;
|
||||
@@ -58,6 +60,8 @@ router.post(
|
||||
if (key === 'userMessage') {
|
||||
userMessage = data[key];
|
||||
userMessageId = data[key].messageId;
|
||||
} else if (key === 'userMessagePromise') {
|
||||
userMessagePromise = data[key];
|
||||
} else if (key === 'responseMessageId') {
|
||||
responseMessageId = data[key];
|
||||
} else if (key === 'promptTokens') {
|
||||
@@ -68,7 +72,15 @@ router.post(
|
||||
}
|
||||
};
|
||||
|
||||
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
const throttledCacheSet = throttle(
|
||||
(text) => {
|
||||
messageCache.set(responseMessageId, text, Time.FIVE_MINUTES);
|
||||
},
|
||||
3000,
|
||||
{ trailing: false },
|
||||
);
|
||||
|
||||
let streaming = null;
|
||||
let timer = null;
|
||||
|
||||
@@ -82,18 +94,7 @@ router.post(
|
||||
clearTimeout(timer);
|
||||
}
|
||||
|
||||
throttledSaveMessage({
|
||||
messageId: responseMessageId,
|
||||
sender,
|
||||
conversationId,
|
||||
parentMessageId: overrideParentMessageId || userMessageId,
|
||||
text: partialText,
|
||||
model: endpointOption.modelOptions.model,
|
||||
unfinished: true,
|
||||
error: false,
|
||||
plugins,
|
||||
user,
|
||||
});
|
||||
throttledCacheSet(partialText);
|
||||
|
||||
streaming = new Promise((resolve) => {
|
||||
timer = setTimeout(() => {
|
||||
@@ -148,18 +149,10 @@ router.post(
|
||||
}
|
||||
};
|
||||
|
||||
const onChainEnd = () => {
|
||||
saveMessage({ ...userMessage, user });
|
||||
sendIntermediateMessage(res, {
|
||||
plugins,
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
};
|
||||
|
||||
const getAbortData = () => ({
|
||||
sender,
|
||||
conversationId,
|
||||
userMessagePromise,
|
||||
messageId: responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: getPartialText(),
|
||||
@@ -167,12 +160,27 @@ router.post(
|
||||
userMessage,
|
||||
promptTokens,
|
||||
});
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData);
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
|
||||
|
||||
try {
|
||||
endpointOption.tools = await validateTools(user, endpointOption.tools);
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
const onChainEnd = () => {
|
||||
if (!client.skipSaveUserMessage) {
|
||||
saveMessage(
|
||||
req,
|
||||
{ ...userMessage, user },
|
||||
{ context: 'api/server/routes/ask/gptPlugins.js - onChainEnd' },
|
||||
);
|
||||
}
|
||||
sendIntermediateMessage(res, {
|
||||
plugins,
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
};
|
||||
|
||||
let response = await client.sendMessage(text, {
|
||||
user,
|
||||
conversationId,
|
||||
@@ -189,7 +197,6 @@ router.post(
|
||||
progressCallback,
|
||||
progressOptions: {
|
||||
res,
|
||||
text,
|
||||
// parentMessageId: overrideParentMessageId || userMessageId,
|
||||
plugins,
|
||||
},
|
||||
@@ -202,13 +209,14 @@ router.post(
|
||||
|
||||
logger.debug('[/ask/gptPlugins]', response);
|
||||
|
||||
response.plugins = plugins.map((p) => ({ ...p, loading: false }));
|
||||
await saveMessage({ ...response, user });
|
||||
const { conversation = {} } = await client.responsePromise;
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
sendMessage(res, {
|
||||
title: await getConvoTitle(user, conversationId),
|
||||
title: conversation.title,
|
||||
final: true,
|
||||
conversation: await getConvo(user, conversationId),
|
||||
conversation,
|
||||
requestMessage: userMessage,
|
||||
responseMessage: response,
|
||||
});
|
||||
@@ -221,6 +229,15 @@ router.post(
|
||||
client,
|
||||
});
|
||||
}
|
||||
|
||||
response.plugins = plugins.map((p) => ({ ...p, loading: false }));
|
||||
if (response.plugins?.length > 0) {
|
||||
await updateMessage(
|
||||
req,
|
||||
{ ...response, user },
|
||||
{ context: 'api/server/routes/ask/gptPlugins.js - save plugins used' },
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
const partialText = getPartialText();
|
||||
handleAbortError(res, req, error, {
|
||||
|
||||
@@ -12,9 +12,10 @@ const {
|
||||
uaParser,
|
||||
checkBan,
|
||||
requireJwtAuth,
|
||||
concurrentLimiter,
|
||||
messageIpLimiter,
|
||||
concurrentLimiter,
|
||||
messageUserLimiter,
|
||||
validateConvoAccess,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const { LIMIT_CONCURRENT_MESSAGES, LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {};
|
||||
@@ -37,6 +38,8 @@ if (isEnabled(LIMIT_MESSAGE_USER)) {
|
||||
router.use(messageUserLimiter);
|
||||
}
|
||||
|
||||
router.use(validateConvoAccess);
|
||||
|
||||
router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI);
|
||||
router.use(`/${EModelEndpoint.chatGPTBrowser}`, askChatGPTBrowser);
|
||||
router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
|
||||
|
||||
@@ -42,7 +42,7 @@ router.post('/:assistant_id', async (req, res) => {
|
||||
return res.status(400).json({ message: 'No functions provided' });
|
||||
}
|
||||
|
||||
let metadata = encryptMetadata(_metadata);
|
||||
let metadata = await encryptMetadata(_metadata);
|
||||
|
||||
let { domain } = metadata;
|
||||
domain = await domainParser(req, domain, true);
|
||||
|
||||
@@ -8,6 +8,7 @@ const {
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
||||
const validateAssistant = require('~/server/middleware/assistants/validate');
|
||||
const chatController = require('~/server/controllers/assistants/chatV1');
|
||||
|
||||
@@ -21,6 +22,14 @@ router.post('/abort', handleAbort());
|
||||
* @param {express.Response} res - The response object, used to send back a response.
|
||||
* @returns {void}
|
||||
*/
|
||||
router.post('/', validateModel, buildEndpointOption, validateAssistant, setHeaders, chatController);
|
||||
router.post(
|
||||
'/',
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
validateAssistant,
|
||||
validateConvoAccess,
|
||||
setHeaders,
|
||||
chatController,
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -8,6 +8,7 @@ const {
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const validateConvoAccess = require('~/server/middleware/validate/convoAccess');
|
||||
const validateAssistant = require('~/server/middleware/assistants/validate');
|
||||
const chatController = require('~/server/controllers/assistants/chatV2');
|
||||
|
||||
@@ -21,6 +22,14 @@ router.post('/abort', handleAbort());
|
||||
* @param {express.Response} res - The response object, used to send back a response.
|
||||
* @returns {void}
|
||||
*/
|
||||
router.post('/', validateModel, buildEndpointOption, validateAssistant, setHeaders, chatController);
|
||||
router.post(
|
||||
'/',
|
||||
validateModel,
|
||||
buildEndpointOption,
|
||||
validateAssistant,
|
||||
validateConvoAccess,
|
||||
setHeaders,
|
||||
chatController,
|
||||
);
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -1,13 +1,6 @@
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const {
|
||||
uaParser,
|
||||
checkBan,
|
||||
requireJwtAuth,
|
||||
// concurrentLimiter,
|
||||
// messageIpLimiter,
|
||||
// messageUserLimiter,
|
||||
} = require('~/server/middleware');
|
||||
const { uaParser, checkBan, requireJwtAuth } = require('~/server/middleware');
|
||||
|
||||
const v1 = require('./v1');
|
||||
const chatV1 = require('./chatV1');
|
||||
|
||||
@@ -21,8 +21,7 @@ const {
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const ldapAuth =
|
||||
!!process.env.LDAP_URL && !!process.env.LDAP_BIND_DN && !!process.env.LDAP_USER_SEARCH_BASE;
|
||||
const ldapAuth = !!process.env.LDAP_URL && !!process.env.LDAP_USER_SEARCH_BASE;
|
||||
//Local
|
||||
router.post('/logout', requireJwtAuth, logoutController);
|
||||
router.post(
|
||||
|
||||
15
api/server/routes/categories.js
Normal file
15
api/server/routes/categories.js
Normal file
@@ -0,0 +1,15 @@
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const { requireJwtAuth } = require('~/server/middleware');
|
||||
const { getCategories } = require('~/models/Categories');
|
||||
|
||||
router.get('/', requireJwtAuth, async (req, res) => {
|
||||
try {
|
||||
const categories = await getCategories();
|
||||
res.status(200).send(categories);
|
||||
} catch (error) {
|
||||
res.status(500).send({ message: 'Failed to retrieve categories', error: error.message });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
@@ -1,6 +1,9 @@
|
||||
const express = require('express');
|
||||
const { defaultSocialLogins } = require('librechat-data-provider');
|
||||
const { CacheKeys, defaultSocialLogins } = require('librechat-data-provider');
|
||||
const { getLdapConfig } = require('~/server/services/Config/ldap');
|
||||
const { getProjectByName } = require('~/models/Project');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const router = express.Router();
|
||||
@@ -17,13 +20,22 @@ const publicSharedLinksEnabled =
|
||||
isEnabled(process.env.ALLOW_SHARED_LINKS_PUBLIC));
|
||||
|
||||
router.get('/', async function (req, res) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const cachedStartupConfig = await cache.get(CacheKeys.STARTUP_CONFIG);
|
||||
if (cachedStartupConfig) {
|
||||
res.send(cachedStartupConfig);
|
||||
return;
|
||||
}
|
||||
|
||||
const isBirthday = () => {
|
||||
const today = new Date();
|
||||
return today.getMonth() === 1 && today.getDate() === 11;
|
||||
};
|
||||
|
||||
const ldapLoginEnabled =
|
||||
!!process.env.LDAP_URL && !!process.env.LDAP_BIND_DN && !!process.env.LDAP_USER_SEARCH_BASE;
|
||||
const instanceProject = await getProjectByName('instance', '_id');
|
||||
|
||||
const ldap = getLdapConfig();
|
||||
|
||||
try {
|
||||
/** @type {TStartupConfig} */
|
||||
const payload = {
|
||||
@@ -41,10 +53,9 @@ router.get('/', async function (req, res) {
|
||||
!!process.env.OPENID_SESSION_SECRET,
|
||||
openidLabel: process.env.OPENID_BUTTON_LABEL || 'Continue with OpenID',
|
||||
openidImageUrl: process.env.OPENID_IMAGE_URL,
|
||||
ldapLoginEnabled,
|
||||
serverDomain: process.env.DOMAIN_SERVER || 'http://localhost:3080',
|
||||
emailLoginEnabled,
|
||||
registrationEnabled: !ldapLoginEnabled && isEnabled(process.env.ALLOW_REGISTRATION),
|
||||
registrationEnabled: !ldap?.enabled && isEnabled(process.env.ALLOW_REGISTRATION),
|
||||
socialLoginEnabled: isEnabled(process.env.ALLOW_SOCIAL_LOGIN),
|
||||
emailEnabled:
|
||||
(!!process.env.EMAIL_SERVICE || !!process.env.EMAIL_HOST) &&
|
||||
@@ -63,12 +74,18 @@ router.get('/', async function (req, res) {
|
||||
sharedLinksEnabled,
|
||||
publicSharedLinksEnabled,
|
||||
analyticsGtmId: process.env.ANALYTICS_GTM_ID,
|
||||
instanceProjectId: instanceProject._id.toString(),
|
||||
};
|
||||
|
||||
if (ldap) {
|
||||
payload.ldap = ldap;
|
||||
}
|
||||
|
||||
if (typeof process.env.CUSTOM_FOOTER === 'string') {
|
||||
payload.customFooter = process.env.CUSTOM_FOOTER;
|
||||
}
|
||||
|
||||
await cache.set(CacheKeys.STARTUP_CONFIG, payload);
|
||||
return res.status(200).send(payload);
|
||||
} catch (err) {
|
||||
logger.error('Error in startup config', err);
|
||||
|
||||
@@ -8,6 +8,7 @@ const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
||||
const { forkConversation } = require('~/server/utils/import/fork');
|
||||
const { importConversations } = require('~/server/utils/import');
|
||||
const { createImportLimiters } = require('~/server/middleware');
|
||||
const { updateTagsForConversation } = require('~/models/ConversationTag');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
@@ -30,8 +31,14 @@ router.get('/', async (req, res) => {
|
||||
return res.status(400).json({ error: 'Invalid page size' });
|
||||
}
|
||||
const isArchived = req.query.isArchived === 'true';
|
||||
let tags;
|
||||
if (req.query.tags) {
|
||||
tags = Array.isArray(req.query.tags) ? req.query.tags : [req.query.tags];
|
||||
} else {
|
||||
tags = undefined;
|
||||
}
|
||||
|
||||
res.status(200).send(await getConvosByPage(req.user.id, pageNumber, pageSize, isArchived));
|
||||
res.status(200).send(await getConvosByPage(req.user.id, pageNumber, pageSize, isArchived, tags));
|
||||
});
|
||||
|
||||
router.get('/:conversationId', async (req, res) => {
|
||||
@@ -104,7 +111,7 @@ router.post('/update', async (req, res) => {
|
||||
const update = req.body.arg;
|
||||
|
||||
try {
|
||||
const dbResponse = await saveConvo(req.user.id, update);
|
||||
const dbResponse = await saveConvo(req, update, { context: 'POST /api/convos/update' });
|
||||
res.status(201).json(dbResponse);
|
||||
} catch (error) {
|
||||
logger.error('Error updating conversation', error);
|
||||
@@ -167,4 +174,18 @@ router.post('/fork', async (req, res) => {
|
||||
}
|
||||
});
|
||||
|
||||
router.put('/tags/:conversationId', async (req, res) => {
|
||||
try {
|
||||
const conversationTags = await updateTagsForConversation(
|
||||
req.user.id,
|
||||
req.params.conversationId,
|
||||
req.body.tags,
|
||||
);
|
||||
res.status(200).json(conversationTags);
|
||||
} catch (error) {
|
||||
logger.error('Error updating conversation tags', error);
|
||||
res.status(500).send('Error updating conversation tags');
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
const express = require('express');
|
||||
const throttle = require('lodash/throttle');
|
||||
const { getResponseSender } = require('librechat-data-provider');
|
||||
const { getResponseSender, CacheKeys, Time } = require('librechat-data-provider');
|
||||
const {
|
||||
handleAbort,
|
||||
createAbortController,
|
||||
handleAbortError,
|
||||
setHeaders,
|
||||
handleAbort,
|
||||
moderateText,
|
||||
validateModel,
|
||||
handleAbortError,
|
||||
validateEndpoint,
|
||||
buildEndpointOption,
|
||||
moderateText,
|
||||
createAbortController,
|
||||
} = require('~/server/middleware');
|
||||
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
|
||||
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
|
||||
const { saveMessage, updateMessage } = require('~/models');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { validateTools } = require('~/app');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
@@ -49,6 +50,7 @@ router.post(
|
||||
});
|
||||
|
||||
let userMessage;
|
||||
let userMessagePromise;
|
||||
let promptTokens;
|
||||
const sender = getResponseSender({
|
||||
...endpointOption,
|
||||
@@ -68,6 +70,8 @@ router.post(
|
||||
for (let key in data) {
|
||||
if (key === 'userMessage') {
|
||||
userMessage = data[key];
|
||||
} else if (key === 'userMessagePromise') {
|
||||
userMessagePromise = data[key];
|
||||
} else if (key === 'responseMessageId') {
|
||||
responseMessageId = data[key];
|
||||
} else if (key === 'promptTokens') {
|
||||
@@ -76,7 +80,15 @@ router.post(
|
||||
}
|
||||
};
|
||||
|
||||
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
|
||||
const messageCache = getLogStores(CacheKeys.MESSAGES);
|
||||
const throttledCacheSet = throttle(
|
||||
(text) => {
|
||||
messageCache.set(responseMessageId, text, Time.FIVE_MINUTES);
|
||||
},
|
||||
3000,
|
||||
{ trailing: false },
|
||||
);
|
||||
|
||||
const {
|
||||
onProgress: progressCallback,
|
||||
sendIntermediateMessage,
|
||||
@@ -87,42 +99,19 @@ router.post(
|
||||
if (plugin.loading === true) {
|
||||
plugin.loading = false;
|
||||
}
|
||||
|
||||
throttledSaveMessage({
|
||||
messageId: responseMessageId,
|
||||
sender,
|
||||
conversationId,
|
||||
parentMessageId: overrideParentMessageId || userMessageId,
|
||||
text: partialText,
|
||||
model: endpointOption.modelOptions.model,
|
||||
unfinished: true,
|
||||
isEdited: true,
|
||||
error: false,
|
||||
user,
|
||||
});
|
||||
throttledCacheSet(partialText);
|
||||
},
|
||||
});
|
||||
|
||||
const onAgentAction = (action, start = false) => {
|
||||
const formattedAction = formatAction(action);
|
||||
plugin.inputs.push(formattedAction);
|
||||
plugin.latest = formattedAction.plugin;
|
||||
if (!start) {
|
||||
saveMessage({ ...userMessage, user });
|
||||
}
|
||||
sendIntermediateMessage(res, {
|
||||
plugin,
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
// logger.debug('PLUGIN ACTION', formattedAction);
|
||||
};
|
||||
|
||||
const onChainEnd = (data) => {
|
||||
let { intermediateSteps: steps } = data;
|
||||
plugin.outputs = steps && steps[0].action ? formatSteps(steps) : 'An error occurred.';
|
||||
plugin.loading = false;
|
||||
saveMessage({ ...userMessage, user });
|
||||
saveMessage(
|
||||
req,
|
||||
{ ...userMessage, user },
|
||||
{ context: 'api/server/routes/ask/gptPlugins.js - onChainEnd' },
|
||||
);
|
||||
sendIntermediateMessage(res, {
|
||||
plugin,
|
||||
parentMessageId: userMessage.messageId,
|
||||
@@ -134,6 +123,7 @@ router.post(
|
||||
const getAbortData = () => ({
|
||||
sender,
|
||||
conversationId,
|
||||
userMessagePromise,
|
||||
messageId: responseMessageId,
|
||||
parentMessageId: overrideParentMessageId ?? userMessageId,
|
||||
text: getPartialText(),
|
||||
@@ -141,12 +131,31 @@ router.post(
|
||||
userMessage,
|
||||
promptTokens,
|
||||
});
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData);
|
||||
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
|
||||
|
||||
try {
|
||||
endpointOption.tools = await validateTools(user, endpointOption.tools);
|
||||
const { client } = await initializeClient({ req, res, endpointOption });
|
||||
|
||||
const onAgentAction = (action, start = false) => {
|
||||
const formattedAction = formatAction(action);
|
||||
plugin.inputs.push(formattedAction);
|
||||
plugin.latest = formattedAction.plugin;
|
||||
if (!start && !client.skipSaveUserMessage) {
|
||||
saveMessage(
|
||||
req,
|
||||
{ ...userMessage, user },
|
||||
{ context: 'api/server/routes/ask/gptPlugins.js - onAgentAction' },
|
||||
);
|
||||
}
|
||||
sendIntermediateMessage(res, {
|
||||
plugin,
|
||||
parentMessageId: userMessage.messageId,
|
||||
messageId: responseMessageId,
|
||||
});
|
||||
// logger.debug('PLUGIN ACTION', formattedAction);
|
||||
};
|
||||
|
||||
let response = await client.sendMessage(text, {
|
||||
user,
|
||||
generation,
|
||||
@@ -164,7 +173,6 @@ router.post(
|
||||
progressCallback,
|
||||
progressOptions: {
|
||||
res,
|
||||
text,
|
||||
plugin,
|
||||
// parentMessageId: overrideParentMessageId || userMessageId,
|
||||
},
|
||||
@@ -176,17 +184,26 @@ router.post(
|
||||
}
|
||||
|
||||
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
|
||||
response.plugin = { ...plugin, loading: false };
|
||||
await saveMessage({ ...response, user });
|
||||
|
||||
const { conversation = {} } = await client.responsePromise;
|
||||
conversation.title =
|
||||
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
|
||||
|
||||
sendMessage(res, {
|
||||
title: await getConvoTitle(user, conversationId),
|
||||
title: conversation.title,
|
||||
final: true,
|
||||
conversation: await getConvo(user, conversationId),
|
||||
conversation,
|
||||
requestMessage: userMessage,
|
||||
responseMessage: response,
|
||||
});
|
||||
res.end();
|
||||
|
||||
response.plugin = { ...plugin, loading: false };
|
||||
await updateMessage(
|
||||
req,
|
||||
{ ...response, user },
|
||||
{ context: 'api/server/routes/edit/gptPlugins.js' },
|
||||
);
|
||||
} catch (error) {
|
||||
const partialText = getPartialText();
|
||||
handleAbortError(res, req, error, {
|
||||
|
||||
@@ -13,6 +13,7 @@ const {
|
||||
messageIpLimiter,
|
||||
concurrentLimiter,
|
||||
messageUserLimiter,
|
||||
validateConvoAccess,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const { LIMIT_CONCURRENT_MESSAGES, LIMIT_MESSAGE_IP, LIMIT_MESSAGE_USER } = process.env ?? {};
|
||||
@@ -35,6 +36,8 @@ if (isEnabled(LIMIT_MESSAGE_USER)) {
|
||||
router.use(messageUserLimiter);
|
||||
}
|
||||
|
||||
router.use(validateConvoAccess);
|
||||
|
||||
router.use([`/${EModelEndpoint.azureOpenAI}`, `/${EModelEndpoint.openAI}`], openAI);
|
||||
router.use(`/${EModelEndpoint.gptPlugins}`, gptPlugins);
|
||||
router.use(`/${EModelEndpoint.anthropic}`, anthropic);
|
||||
|
||||
@@ -1,19 +1,11 @@
|
||||
const express = require('express');
|
||||
const {
|
||||
uaParser,
|
||||
checkBan,
|
||||
requireJwtAuth,
|
||||
createFileLimiters,
|
||||
createTTSLimiters,
|
||||
createSTTLimiters,
|
||||
} = require('~/server/middleware');
|
||||
const { uaParser, checkBan, requireJwtAuth, createFileLimiters } = require('~/server/middleware');
|
||||
const { createMulterInstance } = require('./multer');
|
||||
|
||||
const files = require('./files');
|
||||
const images = require('./images');
|
||||
const avatar = require('./avatar');
|
||||
const stt = require('./stt');
|
||||
const tts = require('./tts');
|
||||
const speech = require('./speech');
|
||||
|
||||
const initialize = async () => {
|
||||
const router = express.Router();
|
||||
@@ -21,11 +13,8 @@ const initialize = async () => {
|
||||
router.use(checkBan);
|
||||
router.use(uaParser);
|
||||
|
||||
/* Important: stt/tts routes must be added before the upload limiters */
|
||||
const { sttIpLimiter, sttUserLimiter } = createSTTLimiters();
|
||||
const { ttsIpLimiter, ttsUserLimiter } = createTTSLimiters();
|
||||
router.use('/stt', sttIpLimiter, sttUserLimiter, stt);
|
||||
router.use('/tts', ttsIpLimiter, ttsUserLimiter, tts);
|
||||
/* Important: speech route must be added before the upload limiters */
|
||||
router.use('/speech', speech);
|
||||
|
||||
const upload = await createMulterInstance();
|
||||
const { fileUploadIpLimiter, fileUploadUserLimiter } = createFileLimiters();
|
||||
|
||||
10
api/server/routes/files/speech/customConfigSpeech.js
Normal file
10
api/server/routes/files/speech/customConfigSpeech.js
Normal file
@@ -0,0 +1,10 @@
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
|
||||
const { getCustomConfigSpeech } = require('~/server/services/Files/Audio');
|
||||
|
||||
router.get('/get', async (req, res) => {
|
||||
await getCustomConfigSpeech(req, res);
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
17
api/server/routes/files/speech/index.js
Normal file
17
api/server/routes/files/speech/index.js
Normal file
@@ -0,0 +1,17 @@
|
||||
const express = require('express');
|
||||
const { createTTSLimiters, createSTTLimiters } = require('~/server/middleware');
|
||||
|
||||
const stt = require('./stt');
|
||||
const tts = require('./tts');
|
||||
const customConfigSpeech = require('./customConfigSpeech');
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
const { sttIpLimiter, sttUserLimiter } = createSTTLimiters();
|
||||
const { ttsIpLimiter, ttsUserLimiter } = createTTSLimiters();
|
||||
router.use('/stt', sttIpLimiter, sttUserLimiter, stt);
|
||||
router.use('/tts', ttsIpLimiter, ttsUserLimiter, tts);
|
||||
|
||||
router.use('/config', customConfigSpeech);
|
||||
|
||||
module.exports = router;
|
||||
@@ -19,6 +19,9 @@ const assistants = require('./assistants');
|
||||
const files = require('./files');
|
||||
const staticRoute = require('./static');
|
||||
const share = require('./share');
|
||||
const categories = require('./categories');
|
||||
const roles = require('./roles');
|
||||
const tags = require('./tags');
|
||||
|
||||
module.exports = {
|
||||
search,
|
||||
@@ -42,4 +45,7 @@ module.exports = {
|
||||
files,
|
||||
staticRoute,
|
||||
share,
|
||||
categories,
|
||||
roles,
|
||||
tags,
|
||||
};
|
||||
|
||||
@@ -1,49 +1,79 @@
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const {
|
||||
getMessages,
|
||||
updateMessage,
|
||||
saveConvo,
|
||||
saveMessage,
|
||||
deleteMessages,
|
||||
} = require('../../models');
|
||||
const { countTokens } = require('../utils');
|
||||
const { requireJwtAuth, validateMessageReq } = require('../middleware/');
|
||||
const { saveConvo, saveMessage, getMessages, updateMessage, deleteMessages } = require('~/models');
|
||||
const { requireJwtAuth, validateMessageReq } = require('~/server/middleware');
|
||||
const { countTokens } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const router = express.Router();
|
||||
router.use(requireJwtAuth);
|
||||
|
||||
/* Note: It's necessary to add `validateMessageReq` within route definition for correct params */
|
||||
router.get('/:conversationId', validateMessageReq, async (req, res) => {
|
||||
const { conversationId } = req.params;
|
||||
res.status(200).send(await getMessages({ conversationId }, '-_id -__v -user'));
|
||||
try {
|
||||
const { conversationId } = req.params;
|
||||
const messages = await getMessages({ conversationId }, '-_id -__v -user');
|
||||
res.status(200).json(messages);
|
||||
} catch (error) {
|
||||
logger.error('Error fetching messages:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// CREATE
|
||||
router.post('/:conversationId', validateMessageReq, async (req, res) => {
|
||||
const message = req.body;
|
||||
const savedMessage = await saveMessage({ ...message, user: req.user.id });
|
||||
await saveConvo(req.user.id, savedMessage);
|
||||
res.status(201).send(savedMessage);
|
||||
try {
|
||||
const message = req.body;
|
||||
const savedMessage = await saveMessage(
|
||||
req,
|
||||
{ ...message, user: req.user.id },
|
||||
{ context: 'POST /api/messages/:conversationId' },
|
||||
);
|
||||
if (!savedMessage) {
|
||||
return res.status(400).json({ error: 'Message not saved' });
|
||||
}
|
||||
await saveConvo(req, savedMessage, { context: 'POST /api/messages/:conversationId' });
|
||||
res.status(201).json(savedMessage);
|
||||
} catch (error) {
|
||||
logger.error('Error saving message:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// READ
|
||||
router.get('/:conversationId/:messageId', validateMessageReq, async (req, res) => {
|
||||
const { conversationId, messageId } = req.params;
|
||||
res.status(200).send(await getMessages({ conversationId, messageId }, '-_id -__v -user'));
|
||||
try {
|
||||
const { conversationId, messageId } = req.params;
|
||||
const message = await getMessages({ conversationId, messageId }, '-_id -__v -user');
|
||||
if (!message) {
|
||||
return res.status(404).json({ error: 'Message not found' });
|
||||
}
|
||||
res.status(200).json(message);
|
||||
} catch (error) {
|
||||
logger.error('Error fetching message:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// UPDATE
|
||||
router.put('/:conversationId/:messageId', validateMessageReq, async (req, res) => {
|
||||
const { messageId, model } = req.params;
|
||||
const { text } = req.body;
|
||||
const tokenCount = await countTokens(text, model);
|
||||
res.status(201).json(await updateMessage({ messageId, text, tokenCount }));
|
||||
try {
|
||||
const { messageId, model } = req.params;
|
||||
const { text } = req.body;
|
||||
const tokenCount = await countTokens(text, model);
|
||||
const result = await updateMessage(req, { messageId, text, tokenCount });
|
||||
res.status(200).json(result);
|
||||
} catch (error) {
|
||||
logger.error('Error updating message:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
// DELETE
|
||||
router.delete('/:conversationId/:messageId', validateMessageReq, async (req, res) => {
|
||||
const { messageId } = req.params;
|
||||
await deleteMessages({ messageId });
|
||||
res.status(204).send();
|
||||
try {
|
||||
const { messageId } = req.params;
|
||||
await deleteMessages({ messageId });
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
logger.error('Error deleting message:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -1,14 +1,235 @@
|
||||
const express = require('express');
|
||||
const { PermissionTypes, Permissions, SystemRoles } = require('librechat-data-provider');
|
||||
const {
|
||||
getPrompt,
|
||||
getPrompts,
|
||||
savePrompt,
|
||||
deletePrompt,
|
||||
getPromptGroup,
|
||||
getPromptGroups,
|
||||
updatePromptGroup,
|
||||
deletePromptGroup,
|
||||
createPromptGroup,
|
||||
getAllPromptGroups,
|
||||
// updatePromptLabels,
|
||||
makePromptProduction,
|
||||
} = require('~/models/Prompt');
|
||||
const { requireJwtAuth, generateCheckAccess } = require('~/server/middleware');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const router = express.Router();
|
||||
const { getPrompts } = require('../../models/Prompt');
|
||||
|
||||
const checkPromptAccess = generateCheckAccess(PermissionTypes.PROMPTS, [Permissions.USE]);
|
||||
const checkPromptCreate = generateCheckAccess(PermissionTypes.PROMPTS, [
|
||||
Permissions.USE,
|
||||
Permissions.CREATE,
|
||||
]);
|
||||
const checkGlobalPromptShare = generateCheckAccess(
|
||||
PermissionTypes.PROMPTS,
|
||||
[Permissions.USE, Permissions.CREATE],
|
||||
{
|
||||
[Permissions.SHARED_GLOBAL]: ['projectIds', 'removeProjectIds'],
|
||||
},
|
||||
);
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
router.use(checkPromptAccess);
|
||||
|
||||
/**
|
||||
* Route to get single prompt group by its ID
|
||||
* GET /groups/:groupId
|
||||
*/
|
||||
router.get('/groups/:groupId', async (req, res) => {
|
||||
let groupId = req.params.groupId;
|
||||
const author = req.user.id;
|
||||
|
||||
const query = {
|
||||
_id: groupId,
|
||||
$or: [{ projectIds: { $exists: true, $ne: [], $not: { $size: 0 } } }, { author }],
|
||||
};
|
||||
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
delete query.$or;
|
||||
}
|
||||
|
||||
try {
|
||||
const group = await getPromptGroup(query);
|
||||
|
||||
if (!group) {
|
||||
return res.status(404).send({ message: 'Prompt group not found' });
|
||||
}
|
||||
|
||||
res.status(200).send(group);
|
||||
} catch (error) {
|
||||
logger.error('Error getting prompt group', error);
|
||||
res.status(500).send({ message: 'Error getting prompt group' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Route to fetch all prompt groups
|
||||
* GET /groups
|
||||
*/
|
||||
router.get('/all', async (req, res) => {
|
||||
try {
|
||||
const groups = await getAllPromptGroups(req, {
|
||||
author: req.user._id,
|
||||
});
|
||||
res.status(200).send(groups);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error getting prompt groups' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Route to fetch paginated prompt groups with filters
|
||||
* GET /groups
|
||||
*/
|
||||
router.get('/groups', async (req, res) => {
|
||||
try {
|
||||
const filter = req.query;
|
||||
/* Note: The aggregation requires an ObjectId */
|
||||
filter.author = req.user._id;
|
||||
const groups = await getPromptGroups(req, filter);
|
||||
res.status(200).send(groups);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error getting prompt groups' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Updates or creates a prompt + promptGroup
|
||||
* @param {object} req
|
||||
* @param {TCreatePrompt} req.body
|
||||
* @param {Express.Response} res
|
||||
*/
|
||||
const createPrompt = async (req, res) => {
|
||||
try {
|
||||
const { prompt, group } = req.body;
|
||||
if (!prompt) {
|
||||
return res.status(400).send({ error: 'Prompt is required' });
|
||||
}
|
||||
|
||||
const saveData = {
|
||||
prompt,
|
||||
group,
|
||||
author: req.user.id,
|
||||
authorName: req.user.name,
|
||||
};
|
||||
|
||||
/** @type {TCreatePromptResponse} */
|
||||
let result;
|
||||
if (group && group.name) {
|
||||
result = await createPromptGroup(saveData);
|
||||
} else {
|
||||
result = await savePrompt(saveData);
|
||||
}
|
||||
res.status(200).send(result);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error saving prompt' });
|
||||
}
|
||||
};
|
||||
|
||||
router.post('/', createPrompt);
|
||||
|
||||
/**
|
||||
* Updates a prompt group
|
||||
* @param {object} req
|
||||
* @param {object} req.params - The request parameters
|
||||
* @param {string} req.params.groupId - The group ID
|
||||
* @param {TUpdatePromptGroupPayload} req.body - The request body
|
||||
* @param {Express.Response} res
|
||||
*/
|
||||
const patchPromptGroup = async (req, res) => {
|
||||
try {
|
||||
const { groupId } = req.params;
|
||||
const author = req.user.id;
|
||||
const filter = { _id: groupId, author };
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
delete filter.author;
|
||||
}
|
||||
const promptGroup = await updatePromptGroup(filter, req.body);
|
||||
res.status(200).send(promptGroup);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error updating prompt group' });
|
||||
}
|
||||
};
|
||||
|
||||
router.patch('/groups/:groupId', checkGlobalPromptShare, patchPromptGroup);
|
||||
|
||||
router.patch('/:promptId/tags/production', checkPromptCreate, async (req, res) => {
|
||||
try {
|
||||
const { promptId } = req.params;
|
||||
const result = await makePromptProduction(promptId);
|
||||
res.status(200).send(result);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error updating prompt production' });
|
||||
}
|
||||
});
|
||||
|
||||
router.get('/:promptId', async (req, res) => {
|
||||
const { promptId } = req.params;
|
||||
const author = req.user.id;
|
||||
const query = { _id: promptId, author };
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
delete query.author;
|
||||
}
|
||||
const prompt = await getPrompt(query);
|
||||
res.status(200).send(prompt);
|
||||
});
|
||||
|
||||
router.get('/', async (req, res) => {
|
||||
let filter = {};
|
||||
// const { search } = req.body.arg;
|
||||
// if (!!search) {
|
||||
// filter = { conversationId };
|
||||
// }
|
||||
res.status(200).send(await getPrompts(filter));
|
||||
try {
|
||||
const author = req.user.id;
|
||||
const { groupId } = req.query;
|
||||
const query = { groupId, author };
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
delete query.author;
|
||||
}
|
||||
const prompts = await getPrompts(query);
|
||||
res.status(200).send(prompts);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error getting prompts' });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Deletes a prompt
|
||||
*
|
||||
* @param {Express.Request} req - The request object.
|
||||
* @param {TDeletePromptVariables} req.params - The request parameters
|
||||
* @param {import('mongoose').ObjectId} req.params.promptId - The prompt ID
|
||||
* @param {Express.Response} res - The response object.
|
||||
* @return {TDeletePromptResponse} A promise that resolves when the prompt is deleted.
|
||||
*/
|
||||
const deletePromptController = async (req, res) => {
|
||||
try {
|
||||
const { promptId } = req.params;
|
||||
const { groupId } = req.query;
|
||||
const author = req.user.id;
|
||||
const query = { promptId, groupId, author, role: req.user.role };
|
||||
if (req.user.role === SystemRoles.ADMIN) {
|
||||
delete query.author;
|
||||
}
|
||||
const result = await deletePrompt(query);
|
||||
res.status(200).send(result);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
res.status(500).send({ error: 'Error deleting prompt' });
|
||||
}
|
||||
};
|
||||
|
||||
router.delete('/:promptId', checkPromptCreate, deletePromptController);
|
||||
|
||||
router.delete('/groups/:groupId', checkPromptCreate, async (req, res) => {
|
||||
const { groupId } = req.params;
|
||||
res.status(200).send(await deletePromptGroup(groupId));
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user