Compare commits

..

1 Commits

Author SHA1 Message Date
Danny Avila
78283e1686 🤖 : Azure Assistants V2 2024-05-21 17:01:49 -04:00
952 changed files with 19444 additions and 75151 deletions

View File

@@ -64,9 +64,6 @@ PROXY=
# ANYSCALE_API_KEY=
# APIPIE_API_KEY=
# COHERE_API_KEY=
# DEEPSEEK_API_KEY=
# DATABRICKS_API_KEY=
# FIREWORKS_API_KEY=
# GROQ_API_KEY=
# HUGGINGFACE_TOKEN=
@@ -75,20 +72,20 @@ PROXY=
# PERPLEXITY_API_KEY=
# SHUTTLEAI_API_KEY=
# TOGETHERAI_API_KEY=
# UNIFY_API_KEY=
#============#
# Anthropic #
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
# Azure #
#============#
# Note: these variables are DEPRECATED
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
@@ -111,26 +108,6 @@ ANTHROPIC_API_KEY=user_provided
BINGAI_TOKEN=user_provided
# BINGAI_HOST=https://cn.bing.com
#=================#
# AWS Bedrock #
#=================#
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
# See all Bedrock model IDs here: https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html#model-ids-arns
# Notes on specific models:
# The following models are not support due to not supporting streaming:
# ai21.j2-mid-v1
# The following models are not support due to not supporting conversation history:
# ai21.j2-ultra-v1, cohere.command-text-v14, cohere.command-light-text-v14
#============#
# Google #
#============#
@@ -138,38 +115,32 @@ BINGAI_TOKEN=user_provided
GOOGLE_KEY=user_provided
# GOOGLE_REVERSE_PROXY=
# Gemini API (AI Studio)
# Gemini API
# GOOGLE_MODELS=gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# Vertex AI
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0409,gemini-1.0-pro-vision-001,gemini-pro,gemini-pro-vision,chat-bison,chat-bison-32k,codechat-bison,codechat-bison-32k,text-bison,text-bison-32k,text-unicorn,code-gecko,code-bison,code-bison-32k
# GOOGLE_TITLE_MODEL=gemini-pro
# Google Safety Settings
# NOTE: These settings apply to both Vertex AI and Gemini API (AI Studio)
# Google Gemini Safety Settings
# NOTE (Vertex AI): You do not have access to the BLOCK_NONE setting by default.
# To use this restricted HarmBlockThreshold setting, you will need to either:
#
# For Vertex AI:
# To use the BLOCK_NONE setting, you need either:
# (a) Access through an allowlist via your Google account team, or
# (b) Switch to monthly invoiced billing: https://cloud.google.com/billing/docs/how-to/invoiced-billing
#
# For Gemini API (AI Studio):
# BLOCK_NONE is available by default, no special account requirements.
#
# Available options: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
# (a) Get access through an allowlist via your Google account team
# (b) Switch your account type to monthly invoiced billing following this instruction:
# https://cloud.google.com/billing/docs/how-to/invoiced-billing
#
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
#============#
# OpenAI #
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
# OPENAI_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
@@ -191,7 +162,7 @@ DEBUG_OPENAI=false
ASSISTANTS_API_KEY=user_provided
# ASSISTANTS_BASE_URL=
# ASSISTANTS_MODELS=gpt-4o,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
# ASSISTANTS_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#==========================#
# Azure Assistants API #
@@ -213,7 +184,7 @@ ASSISTANTS_API_KEY=user_provided
# Plugins #
#============#
# PLUGIN_MODELS=gpt-4o,gpt-4o-mini,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
# PLUGIN_MODELS=gpt-4o,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
DEBUG_PLUGINS=true
@@ -286,23 +257,6 @@ MEILI_NO_ANALYTICS=true
MEILI_HOST=http://0.0.0.0:7700
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
#==================================================#
# Speech to Text & Text to Speech #
#==================================================#
STT_API_KEY=
TTS_API_KEY=
#==================================================#
# RAG #
#==================================================#
# More info: https://www.librechat.ai/docs/configuration/rag_api
# RAG_OPENAI_BASEURL=
# RAG_OPENAI_API_KEY=
# EMBEDDINGS_PROVIDER=openai
# EMBEDDINGS_MODEL=text-embedding-3-small
#===================================================#
# User System #
#===================================================#
@@ -357,9 +311,6 @@ ALLOW_EMAIL_LOGIN=true
ALLOW_REGISTRATION=true
ALLOW_SOCIAL_LOGIN=false
ALLOW_SOCIAL_REGISTRATION=false
ALLOW_PASSWORD_RESET=false
# ALLOW_ACCOUNT_DELETION=true # note: enabled by default if omitted/commented out
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
SESSION_EXPIRY=1000 * 60 * 15
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
@@ -401,19 +352,6 @@ OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
# LDAP
LDAP_URL=
LDAP_BIND_DN=
LDAP_BIND_CREDENTIALS=
LDAP_USER_SEARCH_BASE=
LDAP_SEARCH_FILTER=mail={{username}}
LDAP_CA_CERT_PATH=
# LDAP_TLS_REJECT_UNAUTHORIZED=
# LDAP_LOGIN_USES_USERNAME=true
# LDAP_ID=
# LDAP_USERNAME=
# LDAP_FULL_NAME=
#========================#
# Email Password Reset #
#========================#
@@ -440,25 +378,6 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
#========================#
# Shared Links #
#========================#
ALLOW_SHARED_LINKS=true
ALLOW_SHARED_LINKS_PUBLIC=true
#==============================#
# Static File Cache Control #
#==============================#
# Leave commented out to use defaults: 1 day (86400 seconds) for s-maxage and 2 days (172800 seconds) for max-age
# NODE_ENV must be set to production for these to take effect
# STATIC_CACHE_MAX_AGE=172800
# STATIC_CACHE_S_MAX_AGE=86400
# If you have another service in front of your LibreChat doing compression, disable express based compression here
# DISABLE_COMPRESSION=true
#===================================================#
# UI #
#===================================================#
@@ -469,9 +388,6 @@ HELP_AND_FAQ_URL=https://librechat.ai
# SHOW_BIRTHDAY_ICON=true
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
#==================================================#
# Others #
#==================================================#

View File

@@ -12,7 +12,6 @@ module.exports = {
'plugin:react-hooks/recommended',
'plugin:jest/recommended',
'prettier',
'plugin:jsx-a11y/recommended',
],
ignorePatterns: [
'client/dist/**/*',
@@ -33,7 +32,7 @@ module.exports = {
jsx: true,
},
},
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import', 'jsx-a11y'],
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import'],
rules: {
'react/react-in-jsx-scope': 'off',
'@typescript-eslint/ban-ts-comment': ['error', { 'ts-ignore': 'allow' }],
@@ -66,7 +65,6 @@ module.exports = {
'no-restricted-syntax': 'off',
'react/prop-types': ['off'],
'react/display-name': ['off'],
'no-nested-ternary': 'error',
'no-unused-vars': ['error', { varsIgnorePattern: '^_' }],
quotes: ['error', 'single'],
},
@@ -120,8 +118,6 @@ module.exports = {
],
rules: {
'@typescript-eslint/no-explicit-any': 'error',
'@typescript-eslint/no-unnecessary-condition': 'warn',
'@typescript-eslint/strict-boolean-expressions': 'warn',
},
},
{

View File

@@ -126,18 +126,6 @@ Apply the following naming conventions to branches, labels, and other Git-relate
- **Current Stance**: At present, this backend transition is of lower priority and might not be pursued.
## 7. Module Import Conventions
- `npm` packages first,
- from shortest line (top) to longest (bottom)
- Followed by typescript types (pertains to data-provider and client workspaces)
- longest line (top) to shortest (bottom)
- types from package come first
- Lastly, local imports
- longest line (top) to shortest (bottom)
- imports with alias `~` treated the same as relative import with respect to line length
---

View File

@@ -1,26 +0,0 @@
name: Lint for accessibility issues
on:
pull_request:
paths:
- 'client/src/**'
workflow_dispatch:
inputs:
run_workflow:
description: 'Set to true to run this workflow'
required: true
default: 'false'
jobs:
axe-linter:
runs-on: ubuntu-latest
if: >
(github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == 'danny-avila/LibreChat') ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.run_workflow == 'true')
steps:
- uses: actions/checkout@v4
- uses: dequelabs/axe-linter-action@v1
with:
api_key: ${{ secrets.AXE_LINTER_API_KEY }}
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,41 +0,0 @@
name: Update Test Server
on:
workflow_run:
workflows: ["Docker Dev Images Build"]
types:
- completed
workflow_dispatch:
jobs:
deploy:
runs-on: ubuntu-latest
if: |
github.repository == 'danny-avila/LibreChat' &&
(github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success')
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install SSH Key
uses: shimataro/ssh-key-action@v2
with:
key: ${{ secrets.DO_SSH_PRIVATE_KEY }}
known_hosts: ${{ secrets.DO_KNOWN_HOSTS }}
- name: Run update script on DigitalOcean Droplet
env:
DO_HOST: ${{ secrets.DO_HOST }}
DO_USER: ${{ secrets.DO_USER }}
run: |
ssh -o StrictHostKeyChecking=no ${DO_USER}@${DO_HOST} << EOF
sudo -i -u danny bash << EEOF
cd ~/LibreChat && \
git fetch origin main && \
npm run update:deployed && \
git checkout do-deploy && \
git rebase main && \
npm run start:deployed && \
echo "Update completed. Application should be running now."
EEOF
EOF

View File

@@ -53,4 +53,4 @@ jobs:
- name: Run unit tests
run: npm run test:ci --verbose
working-directory: client
working-directory: client

View File

@@ -1,35 +0,0 @@
name: Build Helm Charts on Tag
# The workflow is triggered when a tag is pushed
on:
push:
tags:
- "*"
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v4
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.6.0
with:
charts_dir: helmchart
env:
CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"

2
.gitignore vendored
View File

@@ -11,7 +11,6 @@ logs
pids
*.pid
*.seed
.git
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
@@ -46,7 +45,6 @@ api/node_modules/
client/node_modules/
bower_components/
*.d.ts
!vite-env.d.ts
# Floobits
.floo

16
.vscode/launch.json vendored
View File

@@ -1,16 +0,0 @@
{
"version": "0.2.0",
"configurations": [
{
"type": "node",
"request": "launch",
"name": "Launch LibreChat (debug)",
"skipFiles": ["<node_internals>/**"],
"program": "${workspaceFolder}/api/server/index.js",
"env": {
"NODE_ENV": "production"
},
"console": "integratedTerminal"
}
]
}

View File

@@ -1,4 +1,4 @@
# v0.7.5-rc2
# v0.7.2
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,44 +1,43 @@
# Dockerfile.multi
# v0.7.5-rc2
# v0.7.2
# Base for all builds
# Build API, Client and Data Provider
FROM node:20-alpine AS base
WORKDIR /app
RUN apk --no-cache add curl
RUN npm config set fetch-retry-maxtimeout 600000 && \
npm config set fetch-retries 5 && \
npm config set fetch-retry-mintimeout 15000
COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
RUN npm ci
# Build data-provider
FROM base AS data-provider-build
WORKDIR /app/packages/data-provider
COPY packages/data-provider ./
COPY ./packages/data-provider ./
RUN npm install; npm cache clean --force
RUN npm run build
RUN npm prune --production
# Client build
# React client build
FROM base AS client-build
WORKDIR /app/client
COPY client ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
COPY ./client/package*.json ./
# Copy data-provider to client's node_modules
COPY --from=data-provider-build /app/packages/data-provider/ /app/client/node_modules/librechat-data-provider/
RUN npm install; npm cache clean --force
COPY ./client/ ./
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build
RUN npm prune --production
# API setup (including client dist)
# Node API setup
FROM base AS api-build
WORKDIR /app
COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
RUN npm prune --production
COPY api/package*.json ./
COPY api/ ./
# Copy helper scripts
COPY config/ ./
# Copy data-provider to API's node_modules
COPY --from=data-provider-build /app/packages/data-provider/ /app/api/node_modules/librechat-data-provider/
RUN npm install --include prod; npm cache clean --force
COPY --from=client-build /app/client/dist /app/client/dist
EXPOSE 3080
ENV HOST=0.0.0.0
CMD ["node", "server/index.js"]
# Nginx setup
FROM nginx:1.21.1-alpine AS prod-stage
COPY ./client/nginx.conf /etc/nginx/conf.d/default.conf
CMD ["nginx", "-g", "daemon off;"]

View File

@@ -27,7 +27,7 @@
</p>
<p align="center">
<a href="https://railway.app/template/b5k2mn?referralCode=HI9hWz">
<a href="https://railway.app/template/b5k2mn?referralCode=myKrVZ">
<img src="https://railway.app/button.svg" alt="Deploy on Railway" height="30">
</a>
<a href="https://zeabur.com/templates/0X2ZY8">
@@ -42,17 +42,15 @@
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
- 🤖 AI model selection:
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Plugins, Assistants API (including Azure Assistants)
- OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
- ✅ Compatible across both **[Remote & Local AI services](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):**
- groq, Ollama, Cohere, Mistral AI, Apple MLX, koboldcpp, OpenRouter, together.ai, Perplexity, ShuttleAI, and more
- 🪄 Generative UI with **[Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3)**
- Create React, HTML code, and Mermaid diagrams right in chat
- 💾 Create, Save, & Share Custom Presets
- 🔀 Switch between AI Endpoints and Presets, mid-chat
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
- 🌿 Fork Messages & Conversations for Advanced Context control
- 💬 Multimodal Chat:
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o` and `gpt-4o-mini`), and Gemini Vision 📸
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o`), and Gemini Vision 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
@@ -60,13 +58,9 @@
- 🌎 Multilingual UI:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers
- 📧 Verify your email to ensure secure access
- 🗣️ Chat hands-free with Speech-to-Text and Text-to-Speech magic
- Automatically send and play Audio
- Supports OpenAI, Azure OpenAI, and Elevenlabs
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers.
- 📥 Import Conversations from LibreChat, ChatGPT, Chatbot UI
- 📤 Export conversations as screenshots, markdown, text, json
- 📤 Export conversations as screenshots, markdown, text, json.
- 🔍 Search all messages/conversations
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
@@ -83,7 +77,7 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.4.png)](https://www.youtube.com/watch?v=cvosUxogdpI)
[![Watch the video](https://img.youtube.com/vi/YLVUW5UP9N0/maxresdefault.jpg)](https://www.youtube.com/watch?v=YLVUW5UP9N0)
Click on the thumbnail to open the video☝
---

View File

@@ -1,25 +1,20 @@
const Anthropic = require('@anthropic-ai/sdk');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
Constants,
EModelEndpoint,
anthropicSettings,
getResponseSender,
EModelEndpoint,
validateVisionModel,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const {
truncateText,
formatMessage,
addCacheControl,
titleFunctionPrompt,
parseParamFromPrompt,
createContextHandlers,
} = require('./prompts');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { getModelMaxTokens, matchModelName } = require('~/utils');
const { sleep } = require('~/server/utils');
const spendTokens = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -33,9 +28,6 @@ function delayBeforeRetry(attempts, baseDelay = 1000) {
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
}
const tokenEventTypes = new Set(['message_start', 'message_delta']);
const { legacy } = anthropicSettings;
class AnthropicClient extends BaseClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
@@ -46,24 +38,6 @@ class AnthropicClient extends BaseClient {
? options.contextStrategy.toLowerCase()
: 'discard';
this.setOptions(options);
/** @type {string | undefined} */
this.systemMessage;
/** @type {AnthropicMessageStartEvent| undefined} */
this.message_start;
/** @type {AnthropicMessageDeltaEvent| undefined} */
this.message_delta;
/** Whether the model is part of the Claude 3 Family
* @type {boolean} */
this.isClaude3;
/** Whether to use Messages API or Completions API
* @type {boolean} */
this.useMessages;
/** Whether or not the model is limited to the legacy amount of output tokens
* @type {boolean} */
this.isLegacyOutput;
/** Whether or not the model supports Prompt Caching
* @type {boolean} */
this.supportsCacheControl;
}
setOptions(options) {
@@ -83,28 +57,18 @@ class AnthropicClient extends BaseClient {
this.options = options;
}
this.modelOptions = Object.assign(
{
model: anthropicSettings.model.default,
},
this.modelOptions,
this.options.modelOptions,
);
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.startsWith('claude-3');
this.isLegacyOutput = !modelMatch.startsWith('claude-3-5-sonnet');
this.supportsCacheControl =
this.options.promptCache && this.checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
this.modelOptions.maxOutputTokens &&
this.modelOptions.maxOutputTokens > legacy.maxOutputTokens.default
) {
this.modelOptions.maxOutputTokens = legacy.maxOutputTokens.default;
}
const modelOptions = this.options.modelOptions || {};
this.modelOptions = {
...modelOptions,
// set some good defaults (check for undefined in some cases because they may be 0)
model: modelOptions.model || 'claude-1',
temperature: typeof modelOptions.temperature === 'undefined' ? 1 : modelOptions.temperature, // 0 - 1, 1 is default
topP: typeof modelOptions.topP === 'undefined' ? 0.7 : modelOptions.topP, // 0 - 1, default: 0.7
topK: typeof modelOptions.topK === 'undefined' ? 40 : modelOptions.topK, // 1-40, default: 40
stop: modelOptions.stop, // no stop method for now
};
this.isClaude3 = this.modelOptions.model.includes('claude-3');
this.useMessages = this.isClaude3 || !!this.options.attachments;
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
@@ -154,92 +118,25 @@ class AnthropicClient extends BaseClient {
/**
* Get the initialized Anthropic client.
* @param {Partial<Anthropic.ClientOptions>} requestOptions - The options for the client.
* @returns {Anthropic} The Anthropic client instance.
*/
getClient(requestOptions) {
/** @type {Anthropic.ClientOptions} */
getClient() {
/** @type {Anthropic.default.RequestOptions} */
const options = {
fetch: this.fetch,
apiKey: this.apiKey,
};
if (this.options.proxy) {
options.httpAgent = new HttpsProxyAgent(this.options.proxy);
}
if (this.options.reverseProxyUrl) {
options.baseURL = this.options.reverseProxyUrl;
}
if (
this.supportsCacheControl &&
requestOptions?.model &&
requestOptions.model.includes('claude-3-5-sonnet')
) {
options.defaultHeaders = {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
};
} else if (this.supportsCacheControl) {
options.defaultHeaders = {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
}
return new Anthropic(options);
}
/**
* Get stream usage as returned by this client's API response.
* @returns {AnthropicStreamUsage} The stream usage object.
*/
getStreamUsage() {
const inputUsage = this.message_start?.message?.usage ?? {};
const outputUsage = this.message_delta?.usage ?? {};
return Object.assign({}, inputUsage, outputUsage);
}
/**
* Calculates the correct token count for the current message based on the token count map and API usage.
* Edge case: If the calculation results in a negative value, it returns the original estimate.
* If revisiting a conversation with a chat history entirely composed of token estimates,
* the cumulative token count going forward should become more accurate as the conversation progresses.
* @param {Object} params - The parameters for the calculation.
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
* @param {string} params.currentMessageId - The ID of the current message to calculate.
* @param {AnthropicStreamUsage} params.usage - The usage object returned by the API.
* @returns {number} The correct token count for the current message.
*/
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
const originalEstimate = tokenCountMap[currentMessageId] || 0;
if (!usage || typeof usage.input_tokens !== 'number') {
return originalEstimate;
}
tokenCountMap[currentMessageId] = 0;
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
const numCount = Number(count);
return sum + (isNaN(numCount) ? 0 : numCount);
}, 0);
const totalInputTokens =
(usage.input_tokens ?? 0) +
(usage.cache_creation_input_tokens ?? 0) +
(usage.cache_read_input_tokens ?? 0);
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
}
/**
* Get Token Count for LibreChat Message
* @param {TMessage} responseMessage
* @returns {number}
*/
getTokenCountForResponse(responseMessage) {
getTokenCountForResponse(response) {
return this.getTokenCountForMessage({
role: 'assistant',
content: responseMessage.text,
content: response.text,
});
}
@@ -292,38 +189,7 @@ class AnthropicClient extends BaseClient {
return files;
}
/**
* @param {object} params
* @param {number} params.promptTokens
* @param {number} params.completionTokens
* @param {AnthropicStreamUsage} [params.usage]
* @param {string} [params.model]
* @param {string} [params.context='message']
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens, usage, model, context = 'message' }) {
if (usage != null && usage?.input_tokens != null) {
const input = usage.input_tokens ?? 0;
const write = usage.cache_creation_input_tokens ?? 0;
const read = usage.cache_read_input_tokens ?? 0;
await spendStructuredTokens(
{
context,
user: this.user,
conversationId: this.conversationId,
model: model ?? this.modelOptions.model,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{
promptTokens: { input, write, read },
completionTokens,
},
);
return;
}
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
await spendTokens(
{
context,
@@ -492,10 +358,7 @@ class AnthropicClient extends BaseClient {
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
}
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
let promptPrefix = (this.options.promptPrefix || '').trim();
if (promptPrefix) {
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
@@ -674,18 +537,6 @@ class AnthropicClient extends BaseClient {
: await client.completions.create(options);
}
/**
* @param {string} modelName
* @returns {boolean}
*/
checkPromptCacheSupport(modelName) {
const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
if (modelMatch === 'claude-3-5-sonnet' || modelMatch === 'claude-3-haiku') {
return true;
}
return false;
}
async sendCompletion(payload, { onProgress, abortController }) {
if (!abortController) {
abortController = new AbortController();
@@ -699,6 +550,8 @@ class AnthropicClient extends BaseClient {
}
logger.debug('modelOptions', { modelOptions });
const client = this.getClient();
const metadata = {
user_id: this.user,
};
@@ -726,28 +579,16 @@ class AnthropicClient extends BaseClient {
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
requestOptions.max_tokens = maxOutputTokens || 1500;
} else {
requestOptions.prompt = payload;
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
}
if (this.systemMessage && this.supportsCacheControl === true) {
requestOptions.system = [
{
type: 'text',
text: this.systemMessage,
cache_control: { type: 'ephemeral' },
},
];
} else if (this.systemMessage) {
if (this.systemMessage) {
requestOptions.system = this.systemMessage;
}
if (this.supportsCacheControl === true && this.useMessages) {
requestOptions.messages = addCacheControl(requestOptions.messages);
}
logger.debug('[AnthropicClient]', { ...requestOptions });
const handleChunk = (currentChunk) => {
@@ -758,14 +599,12 @@ class AnthropicClient extends BaseClient {
};
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
async function processResponse() {
let attempts = 0;
while (attempts < maxRetries) {
let response;
try {
const client = this.getClient(requestOptions);
response = await this.createResponse(client, requestOptions);
signal.addEventListener('abort', () => {
@@ -777,18 +616,11 @@ class AnthropicClient extends BaseClient {
for await (const completion of response) {
// Handle each completion as before
const type = completion?.type ?? '';
if (tokenEventTypes.has(type)) {
logger.debug(`[AnthropicClient] ${type}`, completion);
this[type] = completion;
}
if (completion?.delta?.text) {
handleChunk(completion.delta.text);
} else if (completion.completion) {
handleChunk(completion.completion);
}
await sleep(streamRate);
}
// Successful processing, exit loop
@@ -823,10 +655,8 @@ class AnthropicClient extends BaseClient {
getSaveOptions() {
return {
maxContextTokens: this.options.maxContextTokens,
artifacts: this.options.artifacts,
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
promptCache: this.options.promptCache,
resendFiles: this.options.resendFiles,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
@@ -872,8 +702,6 @@ class AnthropicClient extends BaseClient {
*/
async titleConvo({ text, responseText = '' }) {
let title = 'New Chat';
this.message_delta = undefined;
this.message_start = undefined;
const convo = `<initial_message>
${truncateText(text)}
</initial_message>
@@ -903,11 +731,7 @@ class AnthropicClient extends BaseClient {
};
try {
const response = await this.createResponse(
this.getClient(requestOptions),
requestOptions,
true,
);
const response = await this.createResponse(this.getClient(), requestOptions, true);
let promptTokens = response?.usage?.input_tokens;
let completionTokens = response?.usage?.output_tokens;
if (!promptTokens) {

View File

@@ -1,19 +1,9 @@
const crypto = require('crypto');
const fetch = require('node-fetch');
const {
supportsBalanceCheck,
isAgentsEndpoint,
paramEndpoints,
ErrorTypes,
Constants,
CacheKeys,
Time,
} = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
const { getFiles } = require('~/models/File');
const { getLogStores } = require('~/cache');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -27,21 +17,6 @@ class BaseClient {
month: 'long',
day: 'numeric',
});
this.fetch = this.fetch.bind(this);
/** @type {boolean} */
this.skipSaveConvo = false;
/** @type {boolean} */
this.skipSaveUserMessage = false;
/** @type {ClientDatabaseSavePromise} */
this.userMessagePromise;
/** @type {ClientDatabaseSavePromise} */
this.responsePromise;
/** @type {string} */
this.user;
/** @type {string} */
this.conversationId;
/** @type {string} */
this.responseMessageId;
}
setOptions() {
@@ -68,33 +43,10 @@ class BaseClient {
throw new Error('Subclasses attempted to call summarizeMessages without implementing it');
}
/**
* @returns {string}
*/
getResponseModel() {
if (isAgentsEndpoint(this.options.endpoint) && this.options.agent && this.options.agent.id) {
return this.options.agent.id;
}
return this.modelOptions.model;
async getTokenCountForResponse(response) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response);
}
/**
* Abstract method to get the token count for a message. Subclasses must implement this method.
* @param {TMessage} responseMessage
* @returns {number}
*/
getTokenCountForResponse(responseMessage) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', responseMessage);
}
/**
* Abstract method to record token usage. Subclasses must implement this method.
* If a correction to the token usage is needed, the method should return an object with the corrected token counts.
* @param {number} promptTokens
* @param {number} completionTokens
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
promptTokens,
@@ -102,25 +54,6 @@ class BaseClient {
});
}
/**
* Makes an HTTP request and logs the process.
*
* @param {RequestInfo} url - The URL to make the request to. Can be a string or a Request object.
* @param {RequestInit} [init] - Optional init options for the request.
* @returns {Promise<Response>} - A promise that resolves to the response of the fetch request.
*/
async fetch(_url, init) {
let url = _url;
if (this.options.directEndpoint) {
url = this.options.reverseProxyUrl;
}
logger.debug(`Making request to ${url}`);
if (typeof Bun !== 'undefined') {
return await fetch(url, init);
}
return await fetch(url, init);
}
getBuildMessagesOptions() {
throw new Error('Subclasses must implement getBuildMessagesOptions');
}
@@ -130,45 +63,19 @@ class BaseClient {
await stream.processTextStream(onProgress);
}
/**
* @returns {[string|undefined, string|undefined]}
*/
processOverideIds() {
/** @type {Record<string, string | undefined>} */
let { overrideConvoId, overrideUserMessageId } = this.options?.req?.body ?? {};
if (overrideConvoId) {
const [conversationId, index] = overrideConvoId.split(Constants.COMMON_DIVIDER);
overrideConvoId = conversationId;
if (index !== '0') {
this.skipSaveConvo = true;
}
}
if (overrideUserMessageId) {
const [userMessageId, index] = overrideUserMessageId.split(Constants.COMMON_DIVIDER);
overrideUserMessageId = userMessageId;
if (index !== '0') {
this.skipSaveUserMessage = true;
}
}
return [overrideConvoId, overrideUserMessageId];
}
async setMessageOptions(opts = {}) {
if (opts && opts.replaceOptions) {
this.setOptions(opts);
}
const [overrideConvoId, overrideUserMessageId] = this.processOverideIds();
const { isEdited, isContinued } = opts;
const user = opts.user ?? null;
this.user = user;
const saveOptions = this.getSaveOptions();
this.abortController = opts.abortController ?? new AbortController();
const conversationId = overrideConvoId ?? opts.conversationId ?? crypto.randomUUID();
const conversationId = opts.conversationId ?? crypto.randomUUID();
const parentMessageId = opts.parentMessageId ?? Constants.NO_PARENT;
const userMessageId =
overrideUserMessageId ?? opts.overrideParentMessageId ?? crypto.randomUUID();
const userMessageId = opts.overrideParentMessageId ?? crypto.randomUUID();
let responseMessageId = opts.responseMessageId ?? crypto.randomUUID();
let head = isEdited ? responseMessageId : parentMessageId;
this.currentMessages = (await this.loadHistory(conversationId, head)) ?? [];
@@ -180,8 +87,6 @@ class BaseClient {
this.currentMessages[this.currentMessages.length - 1].messageId = head;
}
this.responseMessageId = responseMessageId;
return {
...opts,
user,
@@ -230,12 +135,11 @@ class BaseClient {
userMessage,
conversationId,
responseMessageId,
sender: this.sender,
});
}
if (typeof opts?.onStart === 'function') {
opts.onStart(userMessage, responseMessageId);
opts.onStart(userMessage);
}
return {
@@ -369,12 +273,7 @@ class BaseClient {
};
}
async handleContextStrategy({
instructions,
orderedMessages,
formattedMessages,
buildTokenMap = true,
}) {
async handleContextStrategy({ instructions, orderedMessages, formattedMessages }) {
let _instructions;
let tokenCount;
@@ -416,10 +315,9 @@ class BaseClient {
const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1];
if (payload.length === 0 && !shouldSummarize && latestMessage) {
const info = `${latestMessage.tokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Prompt token count exceeds max token count (${info}).`);
throw new Error(errorMessage);
throw new Error(
`Prompt token count of ${latestMessage.tokenCount} exceeds max token count of ${this.maxContextTokens}.`,
);
}
if (usePrevSummary) {
@@ -444,23 +342,19 @@ class BaseClient {
maxContextTokens: this.maxContextTokens,
});
/** @type {Record<string, number> | undefined} */
let tokenCountMap;
if (buildTokenMap) {
tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
const { messageId } = message;
if (!messageId) {
return map;
}
if (shouldSummarize && index === summaryIndex && !usePrevSummary) {
map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount };
}
map[messageId] = orderedWithInstructions[index].tokenCount;
let tokenCountMap = orderedWithInstructions.reduce((map, message, index) => {
const { messageId } = message;
if (!messageId) {
return map;
}, {});
}
}
if (shouldSummarize && index === summaryIndex && !usePrevSummary) {
map.summaryMessage = { ...summaryMessage, messageId, tokenCount: summaryTokenCount };
}
map[messageId] = orderedWithInstructions[index].tokenCount;
return map;
}, {});
const promptTokens = this.maxContextTokens - remainingContextTokens;
@@ -479,14 +373,6 @@ class BaseClient {
const { user, head, isEdited, conversationId, responseMessageId, saveOptions, userMessage } =
await this.handleStartMethods(message, opts);
if (opts.progressCallback) {
opts.onProgress = opts.progressCallback.call(null, {
...(opts.progressOptions ?? {}),
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
}
const { generation = '' } = opts;
// It's not necessary to push to currentMessages
@@ -535,13 +421,8 @@ class BaseClient {
this.handleTokenCountMap(tokenCountMap);
}
if (!isEdited && !this.skipSaveUserMessage) {
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessagePromise: this.userMessagePromise,
});
}
if (!isEdited) {
await this.saveMessageToDatabase(userMessage, saveOptions, user);
}
if (
@@ -562,7 +443,6 @@ class BaseClient {
});
}
/** @type {string|string[]|undefined} */
const completion = await this.sendCompletion(payload, opts);
this.abortController.requestCompleted = true;
@@ -572,126 +452,32 @@ class BaseClient {
parentMessageId: userMessage.messageId,
isCreatedByUser: false,
isEdited,
model: this.getResponseModel(),
model: this.modelOptions.model,
sender: this.sender,
text: addSpaceIfNeeded(generation) + completion,
promptTokens,
iconURL: this.options.iconURL,
endpoint: this.options.endpoint,
...(this.metadata ?? {}),
};
if (typeof completion === 'string') {
responseMessage.text = addSpaceIfNeeded(generation) + completion;
} else if (Array.isArray(completion) && paramEndpoints.has(this.options.endpoint)) {
responseMessage.text = '';
responseMessage.content = completion;
} else if (Array.isArray(completion)) {
responseMessage.text = addSpaceIfNeeded(generation) + completion.join('');
}
if (
tokenCountMap &&
this.recordTokenUsage &&
this.getTokenCountForResponse &&
this.getTokenCount
) {
let completionTokens;
/**
* Metadata about input/output costs for the current message. The client
* should provide a function to get the current stream usage metadata; if not,
* use the legacy token estimations.
* @type {StreamUsage | null} */
const usage = this.getStreamUsage != null ? this.getStreamUsage() : null;
if (usage != null && Number(usage.output_tokens) > 0) {
responseMessage.tokenCount = usage.output_tokens;
completionTokens = responseMessage.tokenCount;
await this.updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts });
} else {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
completionTokens = this.getTokenCount(completion);
}
await this.recordTokenUsage({ promptTokens, completionTokens, usage });
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
const completionTokens = this.getTokenCount(completion);
await this.recordTokenUsage({ promptTokens, completionTokens });
}
if (this.userMessagePromise) {
await this.userMessagePromise;
}
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
delete responseMessage.tokenCount;
return responseMessage;
}
/**
* Stream usage should only be used for user message token count re-calculation if:
* - The stream usage is available, with input tokens greater than 0,
* - the client provides a function to calculate the current token count,
* - files are being resent with every message (default behavior; or if `false`, with no attachments),
* - the `promptPrefix` (custom instructions) is not set.
*
* In these cases, the legacy token estimations would be more accurate.
*
* TODO: included system messages in the `orderedMessages` accounting, potentially as a
* separate message in the UI. ChatGPT does this through "hidden" system messages.
* @param {object} params
* @param {StreamUsage} params.usage
* @param {Record<string, number>} params.tokenCountMap
* @param {TMessage} params.userMessage
* @param {object} params.opts
*/
async updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts }) {
/** @type {boolean} */
const shouldUpdateCount =
this.calculateCurrentTokenCount != null &&
Number(usage.input_tokens) > 0 &&
(this.options.resendFiles ||
(!this.options.resendFiles && !this.options.attachments?.length)) &&
!this.options.promptPrefix;
if (!shouldUpdateCount) {
return;
}
const userMessageTokenCount = this.calculateCurrentTokenCount({
currentMessageId: userMessage.messageId,
tokenCountMap,
usage,
});
if (userMessageTokenCount === userMessage.tokenCount) {
return;
}
userMessage.tokenCount = userMessageTokenCount;
/*
Note: `AskController` saves the user message, so we update the count of its `userMessage` reference
*/
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessage,
});
}
/*
Note: we update the user message to be sure it gets the calculated token count;
though `AskController` saves the user message, EditController does not
*/
await this.userMessagePromise;
await this.updateMessageInDatabase({
messageId: userMessage.messageId,
tokenCount: userMessageTokenCount,
});
async getConversation(conversationId, user = null) {
return await getConvo(user, conversationId);
}
async loadHistory(conversationId, parentMessageId = null) {
@@ -748,45 +534,22 @@ class BaseClient {
* @param {string | null} user
*/
async saveMessageToDatabase(message, endpointOptions, user = null) {
if (this.user && user !== this.user) {
throw new Error('User mismatch.');
}
const savedMessage = await saveMessage(
this.options.req,
{
...message,
endpoint: this.options.endpoint,
unfinished: false,
user,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveMessage' },
);
if (this.skipSaveConvo) {
return { message: savedMessage };
}
const conversation = await saveConvo(
this.options.req,
{
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
);
return { message: savedMessage, conversation };
await saveMessage({
...message,
endpoint: this.options.endpoint,
unfinished: false,
user,
});
await saveConvo(user, {
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
});
}
/**
* Update a message in the database.
* @param {Partial<TMessage>} message
*/
async updateMessageInDatabase(message) {
await updateMessage(this.options.req, message);
await updateMessage(message);
}
/**
@@ -908,12 +671,8 @@ class BaseClient {
processValue(nestedValue);
}
} else if (typeof value === 'string') {
} else {
numTokens += this.getTokenCount(value);
} else if (typeof value === 'number') {
numTokens += this.getTokenCount(value.toString());
} else if (typeof value === 'boolean') {
numTokens += this.getTokenCount(value.toString());
}
};

View File

@@ -438,17 +438,9 @@ class ChatGPTClient extends BaseClient {
if (message.eventType === 'text-generation' && message.text) {
onTokenProgress(message.text);
reply += message.text;
}
/*
Cohere API Chinese Unicode character replacement hotfix.
Should be un-commented when the following issue is resolved:
https://github.com/cohere-ai/cohere-typescript/issues/151
else if (message.eventType === 'stream-end' && message.response) {
} else if (message.eventType === 'stream-end' && message.response) {
reply = message.response.text;
}
*/
}
return reply;

View File

@@ -13,20 +13,13 @@ const {
endpointSettings,
EModelEndpoint,
VisionModes,
Constants,
AuthKeys,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images');
const { formatMessage, createContextHandlers } = require('./prompts');
const { getModelMaxTokens } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const {
formatMessage,
createContextHandlers,
titleInstruction,
truncateText,
} = require('./prompts');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
const loc = 'us-central1';
const publisher = 'google';
@@ -120,7 +113,19 @@ class GoogleClient extends BaseClient {
.filter((ex) => ex)
.filter((obj) => obj.input.content !== '' && obj.output.content !== '');
this.modelOptions = this.options.modelOptions || {};
const modelOptions = this.options.modelOptions || {};
this.modelOptions = {
...modelOptions,
// set some good defaults (check for undefined in some cases because they may be 0)
model: modelOptions.model || settings.model.default,
temperature:
typeof modelOptions.temperature === 'undefined'
? settings.temperature.default
: modelOptions.temperature,
topP: typeof modelOptions.topP === 'undefined' ? settings.topP.default : modelOptions.topP,
topK: typeof modelOptions.topK === 'undefined' ? settings.topK.default : modelOptions.topK,
// stop: modelOptions.stop // no stop method for now
};
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
@@ -390,13 +395,8 @@ class GoogleClient extends BaseClient {
parameters: this.modelOptions,
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (promptPrefix) {
payload.instances[0].context = promptPrefix;
if (this.options.promptPrefix) {
payload.instances[0].context = this.options.promptPrefix;
}
if (this.options.examples.length > 0) {
@@ -450,10 +450,7 @@ class GoogleClient extends BaseClient {
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
}
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
let promptPrefix = (this.options.promptPrefix || '').trim();
if (promptPrefix) {
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
@@ -594,16 +591,12 @@ class GoogleClient extends BaseClient {
createLLM(clientOptions) {
const model = clientOptions.modelName ?? clientOptions.model;
if (this.project_id && this.isTextModel) {
logger.debug('Creating Google VertexAI client');
return new GoogleVertexAI(clientOptions);
} else if (this.project_id && this.isChatModel) {
logger.debug('Creating Chat Google VertexAI client');
return new ChatGoogleVertexAI(clientOptions);
} else if (this.project_id) {
logger.debug('Creating VertexAI client');
return new ChatVertexAI(clientOptions);
} else if (model.includes('1.5')) {
logger.debug('Creating GenAI client');
return new GenAI(this.apiKey).getGenerativeModel(
{
...clientOptions,
@@ -613,14 +606,12 @@ class GoogleClient extends BaseClient {
);
}
logger.debug('Creating Chat Google Generative AI client');
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
}
async getCompletion(_payload, options = {}) {
const { parameters, instances } = _payload;
const { onProgress, abortController } = options;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const { parameters, instances } = _payload;
const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {};
let examples;
@@ -673,27 +664,24 @@ class GoogleClient extends BaseClient {
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (modelName?.includes('1.5') && !this.project_id) {
/** @type {GenerativeModel} */
const client = model;
const requestOptions = {
contents: _payload,
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
text: this.options.promptPrefix,
},
],
};
}
requestOptions.safetySettings = _payload.safetySettings;
const safetySettings = _payload.safetySettings;
requestOptions.safetySettings = safetySettings;
const delay = modelName.includes('flash') ? 8 : 14;
const result = await client.generateContentStream(requestOptions);
@@ -703,28 +691,21 @@ class GoogleClient extends BaseClient {
delay,
});
reply += chunkText;
await sleep(streamRate);
}
return reply;
}
const safetySettings = _payload.safetySettings;
const stream = await model.stream(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings: _payload.safetySettings,
safetySettings: safetySettings,
});
let delay = this.options.streamRate || 8;
if (!this.options.streamRate) {
if (this.isGenerativeModel) {
delay = 12;
}
if (modelName.includes('flash')) {
delay = 5;
}
let delay = this.isGenerativeModel ? 12 : 8;
if (modelName.includes('flash')) {
delay = 5;
}
for await (const chunk of stream) {
const chunkText = chunk?.content ?? chunk;
await this.generateTextStream(chunkText, onProgress, {
@@ -736,131 +717,8 @@ class GoogleClient extends BaseClient {
return reply;
}
/**
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
*/
async titleChatCompletion(_payload, options = {}) {
const { abortController } = options;
const { parameters, instances } = _payload;
const { messages: _messages, examples: _examples } = instances?.[0] ?? {};
let clientOptions = { ...parameters, maxRetries: 2 };
logger.debug('Initialized title client options');
if (this.project_id) {
clientOptions['authOptions'] = {
credentials: {
...this.serviceKey,
},
projectId: this.project_id,
};
}
if (!parameters) {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
const model = this.createLLM(clientOptions);
let reply = '';
const messages = this.isTextModel ? _payload.trim() : _messages;
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (modelName?.includes('1.5') && !this.project_id) {
logger.debug('Identified titling model as 1.5 version');
/** @type {GenerativeModel} */
const client = model;
const requestOptions = {
contents: _payload,
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
};
}
const safetySettings = _payload.safetySettings;
requestOptions.safetySettings = safetySettings;
const result = await client.generateContent(requestOptions);
reply = result.response?.text();
return reply;
} else {
logger.debug('Beginning titling');
const safetySettings = _payload.safetySettings;
const titleResponse = await model.invoke(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings: safetySettings,
});
reply = titleResponse.content;
// TODO: RECORD TOKEN USAGE
return reply;
}
}
async titleConvo({ text, responseText = '' }) {
let title = 'New Chat';
const convo = `||>User:
"${truncateText(text)}"
||>Response:
"${JSON.stringify(truncateText(responseText))}"`;
let { prompt: payload } = await this.buildMessages([
{
text: `Please generate ${titleInstruction}
${convo}
||>Title:`,
isCreatedByUser: true,
author: this.userLabel,
},
]);
if (this.isVisionModel) {
logger.warn(
`Current vision model does not support titling without an attachment; falling back to default model ${settings.model.default}`,
);
payload.parameters = { ...payload.parameters, model: settings.model.default };
}
try {
title = await this.titleChatCompletion(payload, {
abortController: new AbortController(),
onProgress: () => {},
});
} catch (e) {
logger.error('[GoogleClient] There was an issue generating the title', e);
}
logger.debug(`Title response: ${title}`);
return title;
}
getSaveOptions() {
return {
artifacts: this.options.artifacts,
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
iconURL: this.options.iconURL,
@@ -875,36 +733,38 @@ class GoogleClient extends BaseClient {
}
async sendCompletion(payload, opts = {}) {
payload.safetySettings = this.getSafetySettings();
const modelName = payload.parameters?.model;
if (modelName && modelName.toLowerCase().includes('gemini')) {
const safetySettings = [
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold:
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold:
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
];
payload.safetySettings = safetySettings;
}
let reply = '';
reply = await this.getCompletion(payload, opts);
return reply.trim();
}
getSafetySettings() {
return [
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold:
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold:
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
];
}
/* TO-DO: Handle tokens with Google tokenization NOTE: these are required */
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
if (tokenizersCache[encoding]) {

View File

@@ -1,9 +1,7 @@
const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { Constants } = require('librechat-data-provider');
const { deriveBaseURL } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const ollamaPayloadSchema = z.object({
@@ -42,7 +40,6 @@ const getValidBase64 = (imageUrl) => {
class OllamaClient {
constructor(options = {}) {
const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434');
this.streamRate = options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
/** @type {Ollama} */
this.client = new Ollama({ host });
}
@@ -139,8 +136,6 @@ class OllamaClient {
stream.controller.abort();
break;
}
await sleep(this.streamRate);
}
}
// TODO: regular completion

View File

@@ -6,7 +6,6 @@ const {
ImageDetail,
EModelEndpoint,
resolveHeaders,
openAISettings,
ImageDetailCost,
CohereConstants,
getResponseSender,
@@ -28,9 +27,9 @@ const {
createContextHandlers,
} = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { spendTokens } = require('~/models/spendTokens');
const { isEnabled, sleep } = require('~/server/utils');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
const { createLLM, RunManager } = require('./llm');
const ChatGPTClient = require('./ChatGPTClient');
const { summaryBuffer } = require('./memory');
@@ -86,13 +85,26 @@ class OpenAIClient extends BaseClient {
this.apiKey = this.options.openaiApiKey;
}
this.modelOptions = Object.assign(
{
model: openAISettings.model.default,
},
this.modelOptions,
this.options.modelOptions,
);
const modelOptions = this.options.modelOptions || {};
if (!this.modelOptions) {
this.modelOptions = {
...modelOptions,
model: modelOptions.model || 'gpt-3.5-turbo',
temperature:
typeof modelOptions.temperature === 'undefined' ? 0.8 : modelOptions.temperature,
top_p: typeof modelOptions.top_p === 'undefined' ? 1 : modelOptions.top_p,
presence_penalty:
typeof modelOptions.presence_penalty === 'undefined' ? 1 : modelOptions.presence_penalty,
stop: modelOptions.stop,
};
} else {
// Update the modelOptions if it already exists
this.modelOptions = {
...this.modelOptions,
...modelOptions,
};
}
this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview';
if (typeof this.options.attachments?.then === 'function') {
@@ -401,7 +413,6 @@ class OpenAIClient extends BaseClient {
getSaveOptions() {
return {
artifacts: this.options.artifacts,
maxContextTokens: this.options.maxContextTokens,
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
@@ -464,9 +475,6 @@ class OpenAIClient extends BaseClient {
let promptTokens;
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (this.options.attachments) {
const attachments = await this.options.attachments;
@@ -580,7 +588,7 @@ class OpenAIClient extends BaseClient {
let streamResult = null;
this.modelOptions.user = this.user;
const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null;
const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion);
const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion || typeof Bun !== 'undefined');
if (typeof opts.onProgress === 'function' && useOldMethod) {
const completionResult = await this.getCompletion(
payload,
@@ -819,7 +827,7 @@ class OpenAIClient extends BaseClient {
const instructionsPayload = [
{
role: this.options.titleMessageRole ?? (this.isOllama ? 'user' : 'system'),
role: 'system',
content: `Please generate ${titleInstruction}
${convo}
@@ -1023,7 +1031,7 @@ ${convo}
async chatCompletion({ payload, onProgress, abortController = null }) {
let error = null;
const errorCallback = (err) => (error = err);
const intermediateReply = [];
let intermediateReply = '';
try {
if (!abortController) {
abortController = new AbortController();
@@ -1098,12 +1106,7 @@ ${convo}
}
if (this.azure || this.options.azure) {
/* Azure Bug, extremely short default `max_tokens` response */
if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') {
modelOptions.max_tokens = 4000;
}
/* Azure does not accept `model` in the body, so we need to remove it. */
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
opts.baseURL = this.langchainProxy
@@ -1124,7 +1127,6 @@ ${convo}
let chatCompletion;
/** @type {OpenAI} */
const openai = new OpenAI({
fetch: this.fetch,
apiKey: this.apiKey,
...opts,
});
@@ -1174,10 +1176,8 @@ ${convo}
});
}
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
if (this.message_file_map && this.isOllama) {
const ollamaClient = new OllamaClient({ baseURL, streamRate });
const ollamaClient = new OllamaClient({ baseURL });
return await ollamaClient.chatCompletion({
payload: modelOptions,
onProgress,
@@ -1186,15 +1186,7 @@ ${convo}
}
let UnexpectedRoleError = false;
/** @type {Promise<void>} */
let streamPromise;
/** @type {(value: void | PromiseLike<void>) => void} */
let streamResolve;
if (modelOptions.stream) {
streamPromise = new Promise((resolve) => {
streamResolve = resolve;
});
const stream = await openai.beta.chat.completions
.stream({
...modelOptions,
@@ -1206,41 +1198,38 @@ ${convo}
.on('error', (err) => {
handleOpenAIErrors(err, errorCallback, 'stream');
})
.on('finalChatCompletion', async (finalChatCompletion) => {
.on('finalChatCompletion', (finalChatCompletion) => {
const finalMessage = finalChatCompletion?.choices?.[0]?.message;
if (!finalMessage) {
return;
}
await streamPromise;
if (finalMessage?.role !== 'assistant') {
if (finalMessage && finalMessage?.role !== 'assistant') {
finalChatCompletion.choices[0].message.role = 'assistant';
}
if (typeof finalMessage.content !== 'string' || finalMessage.content.trim() === '') {
finalChatCompletion.choices[0].message.content = intermediateReply.join('');
if (finalMessage && !finalMessage?.content?.trim()) {
finalChatCompletion.choices[0].message.content = intermediateReply;
}
})
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({ role: 'assistant', content: intermediateReply.join('') });
stream.messages.push({ role: 'assistant', content: intermediateReply });
UnexpectedRoleError = true;
}
});
const azureDelay = this.modelOptions.model?.includes('gpt-4') ? 30 : 17;
for await (const chunk of stream) {
const token = chunk.choices[0]?.delta?.content || '';
intermediateReply.push(token);
intermediateReply += token;
onProgress(token);
if (abortController.signal.aborted) {
stream.controller.abort();
break;
}
await sleep(streamRate);
if (this.azure) {
await sleep(azureDelay);
}
}
streamResolve();
if (!UnexpectedRoleError) {
chatCompletion = await stream.finalChatCompletion().catch((err) => {
handleOpenAIErrors(err, errorCallback, 'finalChatCompletion');
@@ -1268,29 +1257,19 @@ ${convo}
throw new Error('Chat completion failed');
}
const { choices } = chatCompletion;
if (!Array.isArray(choices) || choices.length === 0) {
logger.warn('[OpenAIClient] Chat completion response has no choices');
return intermediateReply.join('');
const { message, finish_reason } = chatCompletion.choices[0];
if (chatCompletion) {
this.metadata = { finish_reason };
}
const { message, finish_reason } = choices[0] ?? {};
this.metadata = { finish_reason };
logger.debug('[OpenAIClient] chatCompletion response', chatCompletion);
if (!message) {
logger.warn('[OpenAIClient] Message is undefined in chatCompletion response');
return intermediateReply.join('');
}
if (typeof message.content !== 'string' || message.content.trim() === '') {
const reply = intermediateReply.join('');
if (!message?.content?.trim() && intermediateReply.length) {
logger.debug(
'[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content',
{ intermediateReply: reply },
{ intermediateReply },
);
return reply;
return intermediateReply;
}
return message.content;
@@ -1299,7 +1278,7 @@ ${convo}
err?.message?.includes('abort') ||
(err instanceof OpenAI.APIError && err?.message?.includes('abort'))
) {
return intermediateReply.join('');
return intermediateReply;
}
if (
err?.message?.includes(
@@ -1314,10 +1293,10 @@ ${convo}
(err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason'))
) {
logger.error('[OpenAIClient] Known OpenAI error:', err);
return intermediateReply.join('');
return intermediateReply;
} else if (err instanceof OpenAI.APIError) {
if (intermediateReply.length > 0) {
return intermediateReply.join('');
if (intermediateReply) {
return intermediateReply;
} else {
throw err;
}

View File

@@ -1,6 +1,5 @@
const OpenAIClient = require('./OpenAIClient');
const { CallbackManager } = require('langchain/callbacks');
const { CacheKeys, Time } = require('librechat-data-provider');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { initializeCustomAgent, initializeFunctionsAgent } = require('./agents');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
@@ -12,7 +11,6 @@ const { SelfReflectionTool } = require('./tools');
const { isEnabled } = require('~/server/utils');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
class PluginsClient extends OpenAIClient {
@@ -42,7 +40,6 @@ class PluginsClient extends OpenAIClient {
getSaveOptions() {
return {
artifacts: this.options.artifacts,
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
tools: this.options.tools,
@@ -146,22 +143,16 @@ class PluginsClient extends OpenAIClient {
// initialize agent
const initializer = this.functionsAgent ? initializeFunctionsAgent : initializeCustomAgent;
let customInstructions = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
customInstructions = `${customInstructions ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
this.executor = await initializer({
model,
signal,
pastMessages,
tools: this.tools,
customInstructions,
verbose: this.options.debug,
returnIntermediateSteps: true,
customName: this.options.chatGptLabel,
currentDateString: this.currentDateString,
customInstructions: this.options.promptPrefix,
callbackManager: CallbackManager.fromHandlers({
async handleAgentAction(action, runId) {
handleAction(action, runId, onAgentAction);
@@ -229,13 +220,6 @@ class PluginsClient extends OpenAIClient {
}
}
/**
*
* @param {TMessage} responseMessage
* @param {Partial<TMessage>} saveOptions
* @param {string} user
* @returns
*/
async handleResponseMessage(responseMessage, saveOptions, user) {
const { output, errorMessage, ...result } = this.result;
logger.debug('[PluginsClient][handleResponseMessage] Output:', {
@@ -254,39 +238,18 @@ class PluginsClient extends OpenAIClient {
await this.recordTokenUsage(responseMessage);
}
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessage.messageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
await this.saveMessageToDatabase(responseMessage, saveOptions, user);
delete responseMessage.tokenCount;
return { ...responseMessage, ...result };
}
async sendMessage(message, opts = {}) {
/** @type {{ filteredTools: string[], includedTools: string[] }} */
const { filteredTools = [], includedTools = [] } = this.options.req.app.locals;
if (includedTools.length > 0) {
const tools = this.options.tools.filter((plugin) => includedTools.includes(plugin));
this.options.tools = tools;
} else {
const tools = this.options.tools.filter((plugin) => !filteredTools.includes(plugin));
this.options.tools = tools;
}
// If a message is edited, no tools can be used.
const completionMode = this.options.tools.length === 0 || opts.isEdited;
if (completionMode) {
this.setOptions(opts);
return super.sendMessage(message, opts);
}
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
const {
user,
@@ -301,14 +264,6 @@ class PluginsClient extends OpenAIClient {
onToolEnd,
} = await this.handleStartMethods(message, opts);
if (opts.progressCallback) {
opts.onProgress = opts.progressCallback.call(null, {
...(opts.progressOptions ?? {}),
parentMessageId: userMessage.messageId,
messageId: responseMessageId,
});
}
this.currentMessages.push(userMessage);
let {
@@ -337,15 +292,7 @@ class PluginsClient extends OpenAIClient {
if (payload) {
this.currentMessages = payload;
}
if (!this.skipSaveUserMessage) {
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessagePromise: this.userMessagePromise,
});
}
}
await this.saveMessageToDatabase(userMessage, saveOptions, user);
if (isEnabled(process.env.CHECK_BALANCE)) {
await checkBalance({

View File

@@ -1,3 +1,44 @@
/*
module.exports = `You are ChatGPT, a Large Language model with useful tools.
Talk to the human and provide meaningful answers when questions are asked.
Use the tools when you need them, but use your own knowledge if you are confident of the answer. Keep answers short and concise.
A tool is not usually needed for creative requests, so do your best to answer them without tools.
Avoid repeating identical answers if it appears before. Only fulfill the human's requests, do not create extra steps beyond what the human has asked for.
Your input for 'Action' should be the name of tool used only.
Be honest. If you can't answer something, or a tool is not appropriate, say you don't know or answer to the best of your ability.
Attempt to fulfill the human's requests in as few actions as possible`;
*/
// module.exports = `You are ChatGPT, a highly knowledgeable and versatile large language model.
// Engage with the Human conversationally, providing concise and meaningful answers to questions. Utilize built-in tools when necessary, except for creative requests, where relying on your own knowledge is preferred. Aim for variety and avoid repetitive answers.
// For your 'Action' input, state the name of the tool used only, and honor user requests without adding extra steps. Always be honest; if you cannot provide an appropriate answer or tool, admit that or do your best.
// Strive to meet the user's needs efficiently with minimal actions.`;
// import {
// BasePromptTemplate,
// BaseStringPromptTemplate,
// SerializedBasePromptTemplate,
// renderTemplate,
// } from "langchain/prompts";
// prefix: `You are ChatGPT, a highly knowledgeable and versatile large language model.
// Your objective is to help users by understanding their intent and choosing the best action. Prioritize direct, specific responses. Use concise, varied answers and rely on your knowledge for creative tasks. Utilize tools when needed, and structure results for machine compatibility.
// prefix: `Objective: to comprehend human intentions based on user input and available tools. Goal: identify the best action to directly address the human's query. In your subsequent steps, you will utilize the chosen action. You may select multiple actions and list them in a meaningful order. Prioritize actions that directly relate to the user's query over general ones. Ensure that the generated thought is highly specific and explicit to best match the user's expectations. Construct the result in a manner that an online open-API would most likely expect. Provide concise and meaningful answers to human queries. Utilize tools when necessary. Relying on your own knowledge is preferred for creative requests. Aim for variety and avoid repetitive answers.
// # Available Actions & Tools:
// N/A: no suitable action, use your own knowledge.`,
// suffix: `Remember, all your responses MUST adhere to the described format and only respond if the format is followed. Output exactly with the requested format, avoiding any other text as this will be parsed by a machine. Following 'Action:', provide only one of the actions listed above. If a tool is not necessary, deduce this quickly and finish your response. Honor the human's requests without adding extra steps. Carry out tasks in the sequence written by the human. Always be honest; if you cannot provide an appropriate answer or tool, do your best with your own knowledge. Strive to meet the user's needs efficiently with minimal actions.`;
module.exports = {
'gpt3-v1': {
prefix: `Objective: Understand human intentions using user input and available tools. Goal: Identify the most suitable actions to directly address user queries.

View File

@@ -1,5 +1,5 @@
const { createStartHandler } = require('~/app/clients/callbacks');
const { spendTokens } = require('~/models/spendTokens');
const spendTokens = require('~/models/spendTokens');
const { logger } = require('~/config');
class RunManager {

View File

@@ -8,7 +8,7 @@ const { isEnabled } = require('~/server/utils');
* @param {Object} options - The options for creating the LLM.
* @param {ModelOptions} options.modelOptions - The options specific to the model, including modelName, temperature, presence_penalty, frequency_penalty, and other model-related settings.
* @param {ConfigOptions} options.configOptions - Configuration options for the API requests, including proxy settings and custom headers.
* @param {Callbacks} [options.callbacks] - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count.
* @param {Callbacks} options.callbacks - Callback functions for managing the lifecycle of the LLM, including token buffers, context, and initial message count.
* @param {boolean} [options.streaming=false] - Determines if the LLM should operate in streaming mode.
* @param {string} options.openAIApiKey - The API key for OpenAI, used for authentication.
* @param {AzureOptions} [options.azure={}] - Optional Azure-specific configurations. If provided, Azure configurations take precedence over OpenAI configurations.

View File

@@ -60,10 +60,10 @@ function addImages(intermediateSteps, responseMessage) {
if (!observation || !observation.includes('![')) {
return;
}
const observedImagePath = observation.match(/!\[[^(]*\]\([^)]*\)/g);
const observedImagePath = observation.match(/!\[.*\]\([^)]*\)/g);
if (observedImagePath && !responseMessage.text.includes(observedImagePath[0])) {
responseMessage.text += '\n' + observedImagePath[0];
logger.debug('[addImages] added image from intermediateSteps:', observedImagePath[0]);
responseMessage.text += '\n' + observation;
logger.debug('[addImages] added image from intermediateSteps:', observation);
}
});
}

View File

@@ -81,62 +81,4 @@ describe('addImages', () => {
addImages(intermediateSteps, responseMessage);
expect(responseMessage.text).toBe(`${originalText}\n${imageMarkdown}`);
});
it('should extract only image markdowns when there is text between them', () => {
const markdownWithTextBetweenImages = `
![image1](/images/image1.png)
Some text between images that should not be included.
![image2](/images/image2.png)
More text that should be ignored.
![image3](/images/image3.png)
`;
intermediateSteps.push({ observation: markdownWithTextBetweenImages });
addImages(intermediateSteps, responseMessage);
expect(responseMessage.text).toBe('\n![image1](/images/image1.png)');
});
it('should only return the first image when multiple images are present', () => {
const markdownWithMultipleImages = `
![image1](/images/image1.png)
![image2](/images/image2.png)
![image3](/images/image3.png)
`;
intermediateSteps.push({ observation: markdownWithMultipleImages });
addImages(intermediateSteps, responseMessage);
expect(responseMessage.text).toBe('\n![image1](/images/image1.png)');
});
it('should not include any text or metadata surrounding the image markdown', () => {
const markdownWithMetadata = `
Title: Test Document
Author: John Doe
![image1](/images/image1.png)
Some content after the image.
Vector values: [0.1, 0.2, 0.3]
`;
intermediateSteps.push({ observation: markdownWithMetadata });
addImages(intermediateSteps, responseMessage);
expect(responseMessage.text).toBe('\n![image1](/images/image1.png)');
});
it('should handle complex markdown with multiple images and only return the first one', () => {
const complexMarkdown = `
# Document Title
## Section 1
Here's some text with an embedded image:
![image1](/images/image1.png)
## Section 2
More text here...
![image2](/images/image2.png)
### Subsection
Even more content
![image3](/images/image3.png)
`;
intermediateSteps.push({ observation: complexMarkdown });
addImages(intermediateSteps, responseMessage);
expect(responseMessage.text).toBe('\n![image1](/images/image1.png)');
});
});

View File

@@ -1,43 +0,0 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
* @param {Array<AnthropicMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage>} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
return messages;
}
const updatedMessages = [...messages];
let userMessagesModified = 0;
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
if (message.role !== 'user') {
continue;
}
if (typeof message.content === 'string') {
message.content = [
{
type: 'text',
text: message.content,
cache_control: { type: 'ephemeral' },
},
];
userMessagesModified++;
} else if (Array.isArray(message.content)) {
for (let j = message.content.length - 1; j >= 0; j--) {
if (message.content[j].type === 'text') {
message.content[j].cache_control = { type: 'ephemeral' };
userMessagesModified++;
break;
}
}
}
}
return updatedMessages;
}
module.exports = addCacheControl;

View File

@@ -1,227 +0,0 @@
const addCacheControl = require('./addCacheControl');
describe('addCacheControl', () => {
test('should add cache control to the last two user messages with array content', () => {
const messages = [
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'Hi there' }] },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
{ role: 'assistant', content: [{ type: 'text', text: 'I\'m doing well, thanks!' }] },
{ role: 'user', content: [{ type: 'text', text: 'Great!' }] },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[2].content[0].cache_control).toEqual({ type: 'ephemeral' });
expect(result[4].content[0].cache_control).toEqual({ type: 'ephemeral' });
});
test('should add cache control to the last two user messages with string content', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
{ role: 'assistant', content: 'I\'m doing well, thanks!' },
{ role: 'user', content: 'Great!' },
];
const result = addCacheControl(messages);
expect(result[0].content).toBe('Hello');
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'How are you?',
cache_control: { type: 'ephemeral' },
});
expect(result[4].content[0]).toEqual({
type: 'text',
text: 'Great!',
cache_control: { type: 'ephemeral' },
});
});
test('should handle mixed string and array content', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).toEqual({
type: 'text',
text: 'Hello',
cache_control: { type: 'ephemeral' },
});
expect(result[2].content[0].cache_control).toEqual({ type: 'ephemeral' });
});
test('should handle less than two user messages', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).toEqual({
type: 'text',
text: 'Hello',
cache_control: { type: 'ephemeral' },
});
expect(result[1].content).toBe('Hi there');
});
test('should return original array if no user messages', () => {
const messages = [
{ role: 'assistant', content: 'Hi there' },
{ role: 'assistant', content: 'How can I help?' },
];
const result = addCacheControl(messages);
expect(result).toEqual(messages);
});
test('should handle empty array', () => {
const messages = [];
const result = addCacheControl(messages);
expect(result).toEqual([]);
});
test('should handle non-array input', () => {
const messages = 'not an array';
const result = addCacheControl(messages);
expect(result).toBe('not an array');
});
test('should not modify assistant messages', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
];
const result = addCacheControl(messages);
expect(result[1].content).toBe('Hi there');
});
test('should handle multiple content items in user messages', () => {
const messages = [
{
role: 'user',
content: [
{ type: 'text', text: 'Hello' },
{ type: 'image', url: 'http://example.com/image.jpg' },
{ type: 'text', text: 'This is an image' },
],
},
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: 'How are you?' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[0].content[1]).not.toHaveProperty('cache_control');
expect(result[0].content[2].cache_control).toEqual({ type: 'ephemeral' });
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'How are you?',
cache_control: { type: 'ephemeral' },
});
});
test('should handle an array with mixed content types', () => {
const messages = [
{ role: 'user', content: 'Hello' },
{ role: 'assistant', content: 'Hi there' },
{ role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
{ role: 'assistant', content: 'I\'m doing well, thanks!' },
{ role: 'user', content: 'Great!' },
];
const result = addCacheControl(messages);
expect(result[0].content).toEqual('Hello');
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'How are you?',
cache_control: { type: 'ephemeral' },
});
expect(result[4].content).toEqual([
{
type: 'text',
text: 'Great!',
cache_control: { type: 'ephemeral' },
},
]);
expect(result[1].content).toBe('Hi there');
expect(result[3].content).toBe('I\'m doing well, thanks!');
});
test('should handle edge case with multiple content types', () => {
const messages = [
{
role: 'user',
content: [
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'some_base64_string' },
},
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'another_base64_string' },
},
{ type: 'text', text: 'what do all these images have in common' },
],
},
{ role: 'assistant', content: 'I see multiple images.' },
{ role: 'user', content: 'Correct!' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[0].content[1]).not.toHaveProperty('cache_control');
expect(result[0].content[2].cache_control).toEqual({ type: 'ephemeral' });
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'Correct!',
cache_control: { type: 'ephemeral' },
});
});
test('should handle user message with no text block', () => {
const messages = [
{
role: 'user',
content: [
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'some_base64_string' },
},
{
type: 'image',
source: { type: 'base64', media_type: 'image/png', data: 'another_base64_string' },
},
],
},
{ role: 'assistant', content: 'I see two images.' },
{ role: 'user', content: 'Correct!' },
];
const result = addCacheControl(messages);
expect(result[0].content[0]).not.toHaveProperty('cache_control');
expect(result[0].content[1]).not.toHaveProperty('cache_control');
expect(result[2].content[0]).toEqual({
type: 'text',
text: 'Correct!',
cache_control: { type: 'ephemeral' },
});
});
});

View File

@@ -1,527 +0,0 @@
const dedent = require('dedent');
const { EModelEndpoint, ArtifactModes } = require('librechat-data-provider');
const { generateShadcnPrompt } = require('~/app/clients/prompts/shadcn-docs/generate');
const { components } = require('~/app/clients/prompts/shadcn-docs/components');
// eslint-disable-next-line no-unused-vars
const artifactsPromptV1 = dedent`The assistant can create and reference artifacts during conversations.
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
# Good artifacts are...
- Substantial content (>15 lines)
- Content that the user is likely to modify, iterate on, or take ownership of
- Self-contained, complex content that can be understood on its own, without context from the conversation
- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations)
- Content likely to be referenced or reused multiple times
# Don't use artifacts for...
- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples
- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept
- Suggestions, commentary, or feedback on existing artifacts
- Conversational or explanatory content that doesn't represent a standalone piece of work
- Content that is dependent on the current conversational context to be useful
- Content that is unlikely to be modified or iterated upon by the user
- Request from users that appears to be a one-off question
# Usage notes
- One artifact per message unless specifically requested
- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users.
- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions.
- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users.
- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation.
- Always provide complete, specific, and fully functional content without any placeholders, ellipses, or 'remains the same' comments.
<artifact_instructions>
When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps:
1. Create the artifact using the following format:
:::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"}
\`\`\`
Your artifact content here
\`\`\`
:::
2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact.
3. Include a \`title\` attribute to provide a brief title or description of the content.
4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute:
- HTML: "text/html"
- The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- The only place external scripts can be imported from is https://cdnjs.cloudflare.com
- Mermaid Diagrams: "application/vnd.mermaid"
- The user interface will render Mermaid diagrams placed within the artifact tags.
- React Components: "application/vnd.react"
- Use this for displaying either: React elements, e.g. \`<strong>Hello World!</strong>\`, React pure functional components, e.g. \`() => <strong>Hello World!</strong>\`, React functional components with Hooks, or React component classes
- When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export.
- Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`).
- Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\`
- The lucide-react@0.263.1 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`<Camera color="red" size={48} />\`
- The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \`<LineChart ...><XAxis dataKey="name"> ...\`
- The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary.
- Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`.
- NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component.
5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...".
6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact.
7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type.
</artifact_instructions>
Here are some examples of correct usage of artifacts:
<examples>
<example_docstring>
This example demonstrates how to create a Mermaid artifact for a simple flow chart.
</example_docstring>
<example>
<user_query>Can you create a simple flow chart showing the process of making tea using Mermaid?</user_query>
<assistant_response>
Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax:
:::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"}
\`\`\`mermaid
graph TD
A[Start] --> B{Water boiled?}
B -->|Yes| C[Add tea leaves to cup]
B -->|No| D[Boil water]
D --> B
C --> E[Pour boiling water into cup]
E --> F[Steep tea for desired time]
F --> G[Remove tea leaves]
G --> H[Add milk or sugar, if desired]
H --> I[Enjoy your tea!]
I --> J[End]
\`\`\`
:::
This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process:
1. Start
2. Check if water is boiled
3. If not boiled, boil the water
4. Once water is boiled, add tea leaves to the cup
5. Pour boiling water into the cup
6. Steep the tea for the desired time
7. Remove the tea leaves
8. Optionally add milk or sugar
9. Enjoy your tea!
10. End
This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions!
</assistant_response>
</example>
</examples>`;
const artifactsPrompt = dedent`The assistant can create and reference artifacts during conversations.
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
# Good artifacts are...
- Substantial content (>15 lines)
- Content that the user is likely to modify, iterate on, or take ownership of
- Self-contained, complex content that can be understood on its own, without context from the conversation
- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations)
- Content likely to be referenced or reused multiple times
# Don't use artifacts for...
- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples
- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept
- Suggestions, commentary, or feedback on existing artifacts
- Conversational or explanatory content that doesn't represent a standalone piece of work
- Content that is dependent on the current conversational context to be useful
- Content that is unlikely to be modified or iterated upon by the user
- Request from users that appears to be a one-off question
# Usage notes
- One artifact per message unless specifically requested
- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users.
- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions.
- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users.
- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation.
- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments.
- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly.
<artifact_instructions>
When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps:
1. Create the artifact using the following format:
:::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"}
\`\`\`
Your artifact content here
\`\`\`
:::
2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact.
3. Include a \`title\` attribute to provide a brief title or description of the content.
4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute:
- HTML: "text/html"
- The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- The only place external scripts can be imported from is https://cdnjs.cloudflare.com
- SVG: "image/svg+xml"
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
- The assistant should specify the viewbox of the SVG rather than defining a width/height
- Mermaid Diagrams: "application/vnd.mermaid"
- The user interface will render Mermaid diagrams placed within the artifact tags.
- React Components: "application/vnd.react"
- Use this for displaying either: React elements, e.g. \`<strong>Hello World!</strong>\`, React pure functional components, e.g. \`() => <strong>Hello World!</strong>\`, React functional components with Hooks, or React component classes
- When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export.
- Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`).
- Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\`
- The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`<Camera color="red" size={48} />\`
- The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \`<LineChart ...><XAxis dataKey="name"> ...\`
- The three.js library is available to be imported, e.g. \`import * as THREE from "three";\`
- The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\`
- The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\`
- The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary.
- Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`.
- NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses.
- If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component.
5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...".
6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact.
7. Always use triple backticks (\`\`\`) to enclose the content within the artifact, regardless of the content type.
</artifact_instructions>
Here are some examples of correct usage of artifacts:
<examples>
<example_docstring>
This example demonstrates how to create a Mermaid artifact for a simple flow chart.
</example_docstring>
<example>
<user_query>Can you create a simple flow chart showing the process of making tea using Mermaid?</user_query>
<assistant_response>
Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax:
:::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"}
\`\`\`mermaid
graph TD
A[Start] --> B{Water boiled?}
B -->|Yes| C[Add tea leaves to cup]
B -->|No| D[Boil water]
D --> B
C --> E[Pour boiling water into cup]
E --> F[Steep tea for desired time]
F --> G[Remove tea leaves]
G --> H[Add milk or sugar, if desired]
H --> I[Enjoy your tea!]
I --> J[End]
\`\`\`
:::
This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process:
1. Start
2. Check if water is boiled
3. If not boiled, boil the water
4. Once water is boiled, add tea leaves to the cup
5. Pour boiling water into the cup
6. Steep the tea for the desired time
7. Remove the tea leaves
8. Optionally add milk or sugar
9. Enjoy your tea!
10. End
This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions!
</assistant_response>
</example>
<example>
<user_query>Create a simple React counter component</user_query>
<assistant_response>
Here's a simple React counter component:
:::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"}
\`\`\`
import { useState } from 'react';
export default function Counter() {
const [count, setCount] = useState(0);
return (
<div className="p-4">
<p className="mb-2">Count: {count}</p>
<button className="bg-blue-500 text-white px-4 py-2 rounded" onClick={() => setCount(count + 1)}>
Increment
</button>
</div>
);
}
\`\`\`
:::
This component creates a simple counter with an increment button.
</assistant_response>
</example>
<example>
<user_query>Create a basic HTML structure for a blog post</user_query>
<assistant_response>
Here's a basic HTML structure for a blog post:
:::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"}
\`\`\`
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>My Blog Post</title>
<style>
body { font-family: Arial, sans-serif; line-height: 1.6; max-width: 800px; margin: 0 auto; padding: 20px; }
h1 { color: #333; }
p { margin-bottom: 15px; }
</style>
</head>
<body>
<header>
<h1>My First Blog Post</h1>
</header>
<main>
<article>
<p>This is the content of my blog post. It's short and sweet!</p>
</article>
</main>
<footer>
<p>&copy; 2023 My Blog</p>
</footer>
</body>
</html>
\`\`\`
:::
This HTML structure provides a simple layout for a blog post.
</assistant_response>
</example>
</examples>`;
const artifactsOpenAIPrompt = dedent`The assistant can create and reference artifacts during conversations.
Artifacts are for substantial, self-contained content that users might modify or reuse, displayed in a separate UI window for clarity.
# Good artifacts are...
- Substantial content (>15 lines)
- Content that the user is likely to modify, iterate on, or take ownership of
- Self-contained, complex content that can be understood on its own, without context from the conversation
- Content intended for eventual use outside the conversation (e.g., reports, emails, presentations)
- Content likely to be referenced or reused multiple times
# Don't use artifacts for...
- Simple, informational, or short content, such as brief code snippets, mathematical equations, or small examples
- Primarily explanatory, instructional, or illustrative content, such as examples provided to clarify a concept
- Suggestions, commentary, or feedback on existing artifacts
- Conversational or explanatory content that doesn't represent a standalone piece of work
- Content that is dependent on the current conversational context to be useful
- Content that is unlikely to be modified or iterated upon by the user
- Request from users that appears to be a one-off question
# Usage notes
- One artifact per message unless specifically requested
- Prefer in-line content (don't use artifacts) when possible. Unnecessary use of artifacts can be jarring for users.
- If a user asks the assistant to "draw an SVG" or "make a website," the assistant does not need to explain that it doesn't have these capabilities. Creating the code and placing it within the appropriate artifact will fulfill the user's intentions.
- If asked to generate an image, the assistant can offer an SVG instead. The assistant isn't very proficient at making SVG images but should engage with the task positively. Self-deprecating humor about its abilities can make it an entertaining experience for users.
- The assistant errs on the side of simplicity and avoids overusing artifacts for content that can be effectively presented within the conversation.
- Always provide complete, specific, and fully functional content for artifacts without any snippets, placeholders, ellipses, or 'remains the same' comments.
- If an artifact is not necessary or requested, the assistant should not mention artifacts at all, and respond to the user accordingly.
## Artifact Instructions
When collaborating with the user on creating content that falls into compatible categories, the assistant should follow these steps:
1. Create the artifact using the following remark-directive markdown format:
:::artifact{identifier="unique-identifier" type="mime-type" title="Artifact Title"}
\`\`\`
Your artifact content here
\`\`\`
:::
a. Example of correct format:
:::artifact{identifier="example-artifact" type="text/plain" title="Example Artifact"}
\`\`\`
This is the content of the artifact.
It can span multiple lines.
\`\`\`
:::
b. Common mistakes to avoid:
- Don't split the opening ::: line
- Don't add extra backticks outside the artifact structure
- Don't omit the closing :::
2. Assign an identifier to the \`identifier\` attribute. For updates, reuse the prior identifier. For new artifacts, the identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact.
3. Include a \`title\` attribute to provide a brief title or description of the content.
4. Add a \`type\` attribute to specify the type of content the artifact represents. Assign one of the following values to the \`type\` attribute:
- HTML: "text/html"
- The user interface can render single file HTML pages placed within the artifact tags. HTML, JS, and CSS should be in a single file when using the \`text/html\` type.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- The only place external scripts can be imported from is https://cdnjs.cloudflare.com
- SVG: "image/svg+xml"
- The user interface will render the Scalable Vector Graphics (SVG) image within the artifact tags.
- The assistant should specify the viewbox of the SVG rather than defining a width/height
- Mermaid Diagrams: "application/vnd.mermaid"
- The user interface will render Mermaid diagrams placed within the artifact tags.
- React Components: "application/vnd.react"
- Use this for displaying either: React elements, e.g. \`<strong>Hello World!</strong>\`, React pure functional components, e.g. \`() => <strong>Hello World!</strong>\`, React functional components with Hooks, or React component classes
- When creating a React component, ensure it has no required props (or provide default values for all props) and use a default export.
- Use Tailwind classes for styling. DO NOT USE ARBITRARY VALUES (e.g. \`h-[600px]\`).
- Base React is available to be imported. To use hooks, first import it at the top of the artifact, e.g. \`import { useState } from "react"\`
- The lucide-react@0.394.0 library is available to be imported. e.g. \`import { Camera } from "lucide-react"\` & \`<Camera color="red" size={48} />\`
- The recharts charting library is available to be imported, e.g. \`import { LineChart, XAxis, ... } from "recharts"\` & \`<LineChart ...><XAxis dataKey="name"> ...\`
- The three.js library is available to be imported, e.g. \`import * as THREE from "three";\`
- The date-fns library is available to be imported, e.g. \`import { compareAsc, format } from "date-fns";\`
- The react-day-picker library is available to be imported, e.g. \`import { DayPicker } from "react-day-picker";\`
- The assistant can use prebuilt components from the \`shadcn/ui\` library after it is imported: \`import { Alert, AlertDescription, AlertTitle, AlertDialog, AlertDialogAction } from '/components/ui/alert';\`. If using components from the shadcn/ui library, the assistant mentions this to the user and offers to help them install the components if necessary.
- Components MUST be imported from \`/components/ui/name\` and NOT from \`/components/name\` or \`@/components/ui/name\`.
- NO OTHER LIBRARIES (e.g. zod, hookform) ARE INSTALLED OR ABLE TO BE IMPORTED.
- Images from the web are not allowed, but you can use placeholder images by specifying the width and height like so \`<img src="/api/placeholder/400/320" alt="placeholder" />\`
- When iterating on code, ensure that the code is complete and functional without any snippets, placeholders, or ellipses.
- If you are unable to follow the above requirements for any reason, don't use artifacts and use regular code blocks instead, which will not attempt to render the component.
5. Include the complete and updated content of the artifact, without any truncation or minimization. Don't use "// rest of the code remains the same...".
6. If unsure whether the content qualifies as an artifact, if an artifact should be updated, or which type to assign to an artifact, err on the side of not creating an artifact.
7. NEVER use triple backticks to enclose the artifact, ONLY the content within the artifact.
Here are some examples of correct usage of artifacts:
## Examples
### Example 1
This example demonstrates how to create a Mermaid artifact for a simple flow chart.
User: Can you create a simple flow chart showing the process of making tea using Mermaid?
Assistant: Sure! Here's a simple flow chart depicting the process of making tea using Mermaid syntax:
:::artifact{identifier="tea-making-flowchart" type="application/vnd.mermaid" title="Flow chart: Making Tea"}
\`\`\`mermaid
graph TD
A[Start] --> B{Water boiled?}
B -->|Yes| C[Add tea leaves to cup]
B -->|No| D[Boil water]
D --> B
C --> E[Pour boiling water into cup]
E --> F[Steep tea for desired time]
F --> G[Remove tea leaves]
G --> H[Add milk or sugar, if desired]
H --> I[Enjoy your tea!]
I --> J[End]
\`\`\`
:::
This flow chart uses Mermaid syntax to visualize the steps involved in making a cup of tea. Here's a brief explanation of the process:
1. Start
2. Check if water is boiled
3. If not boiled, boil the water
4. Once water is boiled, add tea leaves to the cup
5. Pour boiling water into the cup
6. Steep the tea for the desired time
7. Remove the tea leaves
8. Optionally add milk or sugar
9. Enjoy your tea!
10. End
This chart provides a clear visual representation of the tea-making process. You can easily modify or expand this chart if you want to add more details or steps to the process. Let me know if you'd like any changes or have any questions!
---
### Example 2
User: Create a simple React counter component
Assistant: Here's a simple React counter component:
:::artifact{identifier="react-counter" type="application/vnd.react" title="React Counter"}
\`\`\`
import { useState } from 'react';
export default function Counter() {
const [count, setCount] = useState(0);
return (
<div className="p-4">
<p className="mb-2">Count: {count}</p>
<button className="bg-blue-500 text-white px-4 py-2 rounded" onClick={() => setCount(count + 1)}>
Increment
</button>
</div>
);
}
\`\`\`
:::
This component creates a simple counter with an increment button.
---
### Example 3
User: Create a basic HTML structure for a blog post
Assistant: Here's a basic HTML structure for a blog post:
:::artifact{identifier="blog-post-html" type="text/html" title="Blog Post HTML"}
\`\`\`
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>My Blog Post</title>
<style>
body { font-family: Arial, sans-serif; line-height: 1.6; max-width: 800px; margin: 0 auto; padding: 20px; }
h1 { color: #333; }
p { margin-bottom: 15px; }
</style>
</head>
<body>
<header>
<h1>My First Blog Post</h1>
</header>
<main>
<article>
<p>This is the content of my blog post. It's short and sweet!</p>
</article>
</main>
<footer>
<p>&copy; 2023 My Blog</p>
</footer>
</body>
</html>
\`\`\`
:::
This HTML structure provides a simple layout for a blog post.
---`;
/**
*
* @param {Object} params
* @param {EModelEndpoint | string} params.endpoint - The current endpoint
* @param {ArtifactModes} params.artifacts - The current artifact mode
* @returns
*/
const generateArtifactsPrompt = ({ endpoint, artifacts }) => {
if (artifacts === ArtifactModes.CUSTOM) {
return null;
}
let prompt = artifactsPrompt;
if (endpoint !== EModelEndpoint.anthropic) {
prompt = artifactsOpenAIPrompt;
}
if (artifacts === ArtifactModes.SHADCNUI) {
prompt += generateShadcnPrompt({ components, useXML: endpoint === EModelEndpoint.anthropic });
}
return prompt;
};
module.exports = generateArtifactsPrompt;

View File

@@ -8,6 +8,8 @@ In your response, remember to follow these guidelines:
- If you don't know the answer, simply say that you don't know.
- If you are unsure how to answer, ask for clarification.
- Avoid mentioning that you obtained the information from the context.
Answer appropriately in the user's language.
`;
function createContextHandlers(req, userMessageContent) {
@@ -92,40 +94,37 @@ function createContextHandlers(req, userMessageContent) {
const resolvedQueries = await Promise.all(queryPromises);
const context =
resolvedQueries.length === 0
? '\n\tThe semantic search did not return any results.'
: resolvedQueries
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
const context = resolvedQueries
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
const generateContext = (currentContext) =>
`
const generateContext = (currentContext) =>
`
<file>
<filename>${file.filename}</filename>
<context>${currentContext}
</context>
</file>`;
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
<contextItem>
<![CDATA[${pageContent?.trim()}]]>
</contextItem>`;
})
.join('');
return generateContext(contextItems);
})
.join('');
return generateContext(contextItems);
})
.join('');
if (useFullContext) {
const prompt = `${header}
${context}

View File

@@ -1,5 +1,4 @@
const { ToolMessage } = require('@langchain/core/messages');
const { EModelEndpoint, ContentTypes } = require('librechat-data-provider');
const { EModelEndpoint } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
/**
@@ -15,11 +14,11 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
*/
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
if (endpoint === EModelEndpoint.anthropic) {
message.content = [...image_urls, { type: ContentTypes.TEXT, text: message.content }];
message.content = [...image_urls, { type: 'text', text: message.content }];
return message;
}
message.content = [{ type: ContentTypes.TEXT, text: message.content }, ...image_urls];
message.content = [{ type: 'text', text: message.content }, ...image_urls];
return message;
};
@@ -52,7 +51,7 @@ const formatMessage = ({ message, userName, assistantName, endpoint, langChain =
_role = roleMapping[lc_id[2]];
}
const role = _role ?? (sender && sender?.toLowerCase() === 'user' ? 'user' : 'assistant');
const content = _content ?? text ?? '';
const content = text ?? _content ?? '';
const formattedMessage = {
role,
content,
@@ -132,79 +131,4 @@ const formatFromLangChain = (message) => {
};
};
/**
* Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances.
*
* @param {Array<Partial<TMessage>>} payload - The array of messages to format.
* @returns {Array<(HumanMessage|AIMessage|SystemMessage|ToolMessage)>} - The array of formatted LangChain messages, including ToolMessages for tool calls.
*/
const formatAgentMessages = (payload) => {
const messages = [];
for (const message of payload) {
if (message.role !== 'assistant') {
messages.push(formatMessage({ message, langChain: true }));
continue;
}
let currentContent = [];
let lastAIMessage = null;
for (const part of message.content) {
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
// If there's pending content, add it as an AIMessage
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
currentContent = [];
}
// Create a new AIMessage with this text and prepare for tool calls
lastAIMessage = new AIMessage({
content: part.text || '',
});
messages.push(lastAIMessage);
} else if (part.type === ContentTypes.TOOL_CALL) {
if (!lastAIMessage) {
throw new Error('Invalid tool call structure: No preceding AIMessage with tool_call_ids');
}
// Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it
const { output, args: _args, ...tool_call } = part.tool_call;
// TODO: investigate; args as dictionary may need to be provider-or-tool-specific
let args = _args;
try {
args = JSON.parse(args);
} catch (e) {
// failed to parse, leave as is
}
tool_call.args = args;
lastAIMessage.tool_calls.push(tool_call);
// Add the corresponding ToolMessage
messages.push(
new ToolMessage({
tool_call_id: tool_call.id,
name: tool_call.name,
content: output,
}),
);
} else {
currentContent.push(part);
}
}
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
}
}
return messages;
};
module.exports = {
formatMessage,
formatFromLangChain,
formatAgentMessages,
formatLangChainMessages,
};
module.exports = { formatMessage, formatLangChainMessages, formatFromLangChain };

View File

@@ -1,4 +1,3 @@
const addCacheControl = require('./addCacheControl');
const formatMessages = require('./formatMessages');
const summaryPrompts = require('./summaryPrompts');
const handleInputs = require('./handleInputs');
@@ -9,7 +8,6 @@ const createVisionPrompt = require('./createVisionPrompt');
const createContextHandlers = require('./createContextHandlers');
module.exports = {
addCacheControl,
...formatMessages,
...summaryPrompts,
...handleInputs,

View File

@@ -1,495 +0,0 @@
// Essential Components
const essentialComponents = {
avatar: {
componentName: 'Avatar',
importDocs: 'import { Avatar, AvatarFallback, AvatarImage } from "/components/ui/avatar"',
usageDocs: `
<Avatar>
<AvatarImage src="https://github.com/shadcn.png" />
<AvatarFallback>CN</AvatarFallback>
</Avatar>`,
},
button: {
componentName: 'Button',
importDocs: 'import { Button } from "/components/ui/button"',
usageDocs: `
<Button variant="outline">Button</Button>`,
},
card: {
componentName: 'Card',
importDocs: `
import {
Card,
CardContent,
CardDescription,
CardFooter,
CardHeader,
CardTitle,
} from "/components/ui/card"`,
usageDocs: `
<Card>
<CardHeader>
<CardTitle>Card Title</CardTitle>
<CardDescription>Card Description</CardDescription>
</CardHeader>
<CardContent>
<p>Card Content</p>
</CardContent>
<CardFooter>
<p>Card Footer</p>
</CardFooter>
</Card>`,
},
checkbox: {
componentName: 'Checkbox',
importDocs: 'import { Checkbox } from "/components/ui/checkbox"',
usageDocs: '<Checkbox />',
},
input: {
componentName: 'Input',
importDocs: 'import { Input } from "/components/ui/input"',
usageDocs: '<Input />',
},
label: {
componentName: 'Label',
importDocs: 'import { Label } from "/components/ui/label"',
usageDocs: '<Label htmlFor="email">Your email address</Label>',
},
radioGroup: {
componentName: 'RadioGroup',
importDocs: `
import { Label } from "/components/ui/label"
import { RadioGroup, RadioGroupItem } from "/components/ui/radio-group"`,
usageDocs: `
<RadioGroup defaultValue="option-one">
<div className="flex items-center space-x-2">
<RadioGroupItem value="option-one" id="option-one" />
<Label htmlFor="option-one">Option One</Label>
</div>
<div className="flex items-center space-x-2">
<RadioGroupItem value="option-two" id="option-two" />
<Label htmlFor="option-two">Option Two</Label>
</div>
</RadioGroup>`,
},
select: {
componentName: 'Select',
importDocs: `
import {
Select,
SelectContent,
SelectItem,
SelectTrigger,
SelectValue,
} from "/components/ui/select"`,
usageDocs: `
<Select>
<SelectTrigger className="w-[180px]">
<SelectValue placeholder="Theme" />
</SelectTrigger>
<SelectContent>
<SelectItem value="light">Light</SelectItem>
<SelectItem value="dark">Dark</SelectItem>
<SelectItem value="system">System</SelectItem>
</SelectContent>
</Select>`,
},
textarea: {
componentName: 'Textarea',
importDocs: 'import { Textarea } from "/components/ui/textarea"',
usageDocs: '<Textarea />',
},
};
// Extra Components
const extraComponents = {
accordion: {
componentName: 'Accordion',
importDocs: `
import {
Accordion,
AccordionContent,
AccordionItem,
AccordionTrigger,
} from "/components/ui/accordion"`,
usageDocs: `
<Accordion type="single" collapsible>
<AccordionItem value="item-1">
<AccordionTrigger>Is it accessible?</AccordionTrigger>
<AccordionContent>
Yes. It adheres to the WAI-ARIA design pattern.
</AccordionContent>
</AccordionItem>
</Accordion>`,
},
alertDialog: {
componentName: 'AlertDialog',
importDocs: `
import {
AlertDialog,
AlertDialogAction,
AlertDialogCancel,
AlertDialogContent,
AlertDialogDescription,
AlertDialogFooter,
AlertDialogHeader,
AlertDialogTitle,
AlertDialogTrigger,
} from "/components/ui/alert-dialog"`,
usageDocs: `
<AlertDialog>
<AlertDialogTrigger>Open</AlertDialogTrigger>
<AlertDialogContent>
<AlertDialogHeader>
<AlertDialogTitle>Are you absolutely sure?</AlertDialogTitle>
<AlertDialogDescription>
This action cannot be undone.
</AlertDialogDescription>
</AlertDialogHeader>
<AlertDialogFooter>
<AlertDialogCancel>Cancel</AlertDialogCancel>
<AlertDialogAction>Continue</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
</AlertDialog>`,
},
alert: {
componentName: 'Alert',
importDocs: `
import {
Alert,
AlertDescription,
AlertTitle,
} from "/components/ui/alert"`,
usageDocs: `
<Alert>
<AlertTitle>Heads up!</AlertTitle>
<AlertDescription>
You can add components to your app using the cli.
</AlertDescription>
</Alert>`,
},
aspectRatio: {
componentName: 'AspectRatio',
importDocs: 'import { AspectRatio } from "/components/ui/aspect-ratio"',
usageDocs: `
<AspectRatio ratio={16 / 9}>
<Image src="..." alt="Image" className="rounded-md object-cover" />
</AspectRatio>`,
},
badge: {
componentName: 'Badge',
importDocs: 'import { Badge } from "/components/ui/badge"',
usageDocs: '<Badge>Badge</Badge>',
},
calendar: {
componentName: 'Calendar',
importDocs: 'import { Calendar } from "/components/ui/calendar"',
usageDocs: '<Calendar />',
},
carousel: {
componentName: 'Carousel',
importDocs: `
import {
Carousel,
CarouselContent,
CarouselItem,
CarouselNext,
CarouselPrevious,
} from "/components/ui/carousel"`,
usageDocs: `
<Carousel>
<CarouselContent>
<CarouselItem>...</CarouselItem>
<CarouselItem>...</CarouselItem>
<CarouselItem>...</CarouselItem>
</CarouselContent>
<CarouselPrevious />
<CarouselNext />
</Carousel>`,
},
collapsible: {
componentName: 'Collapsible',
importDocs: `
import {
Collapsible,
CollapsibleContent,
CollapsibleTrigger,
} from "/components/ui/collapsible"`,
usageDocs: `
<Collapsible>
<CollapsibleTrigger>Can I use this in my project?</CollapsibleTrigger>
<CollapsibleContent>
Yes. Free to use for personal and commercial projects. No attribution required.
</CollapsibleContent>
</Collapsible>`,
},
dialog: {
componentName: 'Dialog',
importDocs: `
import {
Dialog,
DialogContent,
DialogDescription,
DialogHeader,
DialogTitle,
DialogTrigger,
} from "/components/ui/dialog"`,
usageDocs: `
<Dialog>
<DialogTrigger>Open</DialogTrigger>
<DialogContent>
<DialogHeader>
<DialogTitle>Are you sure absolutely sure?</DialogTitle>
<DialogDescription>
This action cannot be undone.
</DialogDescription>
</DialogHeader>
</DialogContent>
</Dialog>`,
},
dropdownMenu: {
componentName: 'DropdownMenu',
importDocs: `
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuLabel,
DropdownMenuSeparator,
DropdownMenuTrigger,
} from "/components/ui/dropdown-menu"`,
usageDocs: `
<DropdownMenu>
<DropdownMenuTrigger>Open</DropdownMenuTrigger>
<DropdownMenuContent>
<DropdownMenuLabel>My Account</DropdownMenuLabel>
<DropdownMenuSeparator />
<DropdownMenuItem>Profile</DropdownMenuItem>
<DropdownMenuItem>Billing</DropdownMenuItem>
<DropdownMenuItem>Team</DropdownMenuItem>
<DropdownMenuItem>Subscription</DropdownMenuItem>
</DropdownMenuContent>
</DropdownMenu>`,
},
menubar: {
componentName: 'Menubar',
importDocs: `
import {
Menubar,
MenubarContent,
MenubarItem,
MenubarMenu,
MenubarSeparator,
MenubarShortcut,
MenubarTrigger,
} from "/components/ui/menubar"`,
usageDocs: `
<Menubar>
<MenubarMenu>
<MenubarTrigger>File</MenubarTrigger>
<MenubarContent>
<MenubarItem>
New Tab <MenubarShortcut>⌘T</MenubarShortcut>
</MenubarItem>
<MenubarItem>New Window</MenubarItem>
<MenubarSeparator />
<MenubarItem>Share</MenubarItem>
<MenubarSeparator />
<MenubarItem>Print</MenubarItem>
</MenubarContent>
</MenubarMenu>
</Menubar>`,
},
navigationMenu: {
componentName: 'NavigationMenu',
importDocs: `
import {
NavigationMenu,
NavigationMenuContent,
NavigationMenuItem,
NavigationMenuLink,
NavigationMenuList,
NavigationMenuTrigger,
navigationMenuTriggerStyle,
} from "/components/ui/navigation-menu"`,
usageDocs: `
<NavigationMenu>
<NavigationMenuList>
<NavigationMenuItem>
<NavigationMenuTrigger>Item One</NavigationMenuTrigger>
<NavigationMenuContent>
<NavigationMenuLink>Link</NavigationMenuLink>
</NavigationMenuContent>
</NavigationMenuItem>
</NavigationMenuList>
</NavigationMenu>`,
},
popover: {
componentName: 'Popover',
importDocs: `
import {
Popover,
PopoverContent,
PopoverTrigger,
} from "/components/ui/popover"`,
usageDocs: `
<Popover>
<PopoverTrigger>Open</PopoverTrigger>
<PopoverContent>Place content for the popover here.</PopoverContent>
</Popover>`,
},
progress: {
componentName: 'Progress',
importDocs: 'import { Progress } from "/components/ui/progress"',
usageDocs: '<Progress value={33} />',
},
separator: {
componentName: 'Separator',
importDocs: 'import { Separator } from "/components/ui/separator"',
usageDocs: '<Separator />',
},
sheet: {
componentName: 'Sheet',
importDocs: `
import {
Sheet,
SheetContent,
SheetDescription,
SheetHeader,
SheetTitle,
SheetTrigger,
} from "/components/ui/sheet"`,
usageDocs: `
<Sheet>
<SheetTrigger>Open</SheetTrigger>
<SheetContent>
<SheetHeader>
<SheetTitle>Are you sure absolutely sure?</SheetTitle>
<SheetDescription>
This action cannot be undone.
</SheetDescription>
</SheetHeader>
</SheetContent>
</Sheet>`,
},
skeleton: {
componentName: 'Skeleton',
importDocs: 'import { Skeleton } from "/components/ui/skeleton"',
usageDocs: '<Skeleton className="w-[100px] h-[20px] rounded-full" />',
},
slider: {
componentName: 'Slider',
importDocs: 'import { Slider } from "/components/ui/slider"',
usageDocs: '<Slider defaultValue={[33]} max={100} step={1} />',
},
switch: {
componentName: 'Switch',
importDocs: 'import { Switch } from "/components/ui/switch"',
usageDocs: '<Switch />',
},
table: {
componentName: 'Table',
importDocs: `
import {
Table,
TableBody,
TableCaption,
TableCell,
TableHead,
TableHeader,
TableRow,
} from "/components/ui/table"`,
usageDocs: `
<Table>
<TableCaption>A list of your recent invoices.</TableCaption>
<TableHeader>
<TableRow>
<TableHead className="w-[100px]">Invoice</TableHead>
<TableHead>Status</TableHead>
<TableHead>Method</TableHead>
<TableHead className="text-right">Amount</TableHead>
</TableRow>
</TableHeader>
<TableBody>
<TableRow>
<TableCell className="font-medium">INV001</TableCell>
<TableCell>Paid</TableCell>
<TableCell>Credit Card</TableCell>
<TableCell className="text-right">$250.00</TableCell>
</TableRow>
</TableBody>
</Table>`,
},
tabs: {
componentName: 'Tabs',
importDocs: `
import {
Tabs,
TabsContent,
TabsList,
TabsTrigger,
} from "/components/ui/tabs"`,
usageDocs: `
<Tabs defaultValue="account" className="w-[400px]">
<TabsList>
<TabsTrigger value="account">Account</TabsTrigger>
<TabsTrigger value="password">Password</TabsTrigger>
</TabsList>
<TabsContent value="account">Make changes to your account here.</TabsContent>
<TabsContent value="password">Change your password here.</TabsContent>
</Tabs>`,
},
toast: {
componentName: 'Toast',
importDocs: `
import { useToast } from "/components/ui/use-toast"
import { Button } from "/components/ui/button"`,
usageDocs: `
export function ToastDemo() {
const { toast } = useToast()
return (
<Button
onClick={() => {
toast({
title: "Scheduled: Catch up",
description: "Friday, February 10, 2023 at 5:57 PM",
})
}}
>
Show Toast
</Button>
)
}`,
},
toggle: {
componentName: 'Toggle',
importDocs: 'import { Toggle } from "/components/ui/toggle"',
usageDocs: '<Toggle>Toggle</Toggle>',
},
tooltip: {
componentName: 'Tooltip',
importDocs: `
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from "/components/ui/tooltip"`,
usageDocs: `
<TooltipProvider>
<Tooltip>
<TooltipTrigger>Hover</TooltipTrigger>
<TooltipContent>
<p>Add to library</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>`,
},
};
const components = Object.assign({}, essentialComponents, extraComponents);
module.exports = {
components,
};

View File

@@ -1,50 +0,0 @@
const dedent = require('dedent');
/**
* Generate system prompt for AI-assisted React component creation
* @param {Object} options - Configuration options
* @param {Object} options.components - Documentation for shadcn components
* @param {boolean} [options.useXML=false] - Whether to use XML-style formatting for component instructions
* @returns {string} The generated system prompt
*/
function generateShadcnPrompt(options) {
const { components, useXML = false } = options;
let systemPrompt = dedent`
## Additional Artifact Instructions for React Components: "application/vnd.react"
There are some prestyled components (primitives) available for use. Please use your best judgement to use any of these components if the app calls for one.
Here are the components that are available, along with how to import them, and how to use them:
${Object.values(components)
.map((component) => {
if (useXML) {
return dedent`
<component>
<name>${component.componentName}</name>
<import-instructions>${component.importDocs}</import-instructions>
<usage-instructions>${component.usageDocs}</usage-instructions>
</component>
`;
} else {
return dedent`
# ${component.componentName}
## Import Instructions
${component.importDocs}
## Usage Instructions
${component.usageDocs}
`;
}
})
.join('\n\n')}
`;
return systemPrompt;
}
module.exports = {
generateShadcnPrompt,
};

View File

@@ -28,7 +28,7 @@ ${convo}`,
};
const titleInstruction =
'a concise, 5-word-or-less title for the conversation, using its same language, with no punctuation. Apply title case conventions appropriate for the language. Never directly mention the language name or the word "title"';
'a concise, 5-word-or-less title for the conversation, using its same language, with no punctuation. Apply title case conventions appropriate for the language. For English, use AP Stylebook Title Case. Never directly mention the language name or the word "title"';
const titleFunctionPrompt = `In this environment you have access to a set of tools you can use to generate the conversation title.
You may call them like this:

View File

@@ -1,6 +1,4 @@
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
const AnthropicClient = require('../AnthropicClient');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
@@ -24,7 +22,7 @@ describe('AnthropicClient', () => {
const options = {
modelOptions: {
model,
temperature: anthropicSettings.temperature.default,
temperature: 0.7,
},
};
client = new AnthropicClient('test-api-key');
@@ -35,42 +33,7 @@ describe('AnthropicClient', () => {
it('should set the options correctly', () => {
expect(client.apiKey).toBe('test-api-key');
expect(client.modelOptions.model).toBe(model);
expect(client.modelOptions.temperature).toBe(anthropicSettings.temperature.default);
});
it('should set legacy maxOutputTokens for non-Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
maxOutputTokens: anthropicSettings.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should not set maxOutputTokens if not provided', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3',
},
});
expect(client.modelOptions.maxOutputTokens).toBeUndefined();
});
it('should not set legacy maxOutputTokens for Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3-opus-20240229',
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
expect(client.modelOptions.temperature).toBe(0.7);
});
});
@@ -173,212 +136,4 @@ describe('AnthropicClient', () => {
expect(prompt).toContain('You are Claude-2');
});
});
describe('getClient', () => {
it('should set legacy maxOutputTokens for non-Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should not set legacy maxOutputTokens for Claude-3 models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-3-opus-20240229',
maxOutputTokens: anthropicSettings.legacy.maxOutputTokens.default,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should add beta header for claude-3-5-sonnet model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-3-5-sonnet-20240307',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
);
});
it('should add beta header for claude-3-haiku model', () => {
const client = new AnthropicClient('test-api-key');
const modelOptions = {
model: 'claude-3-haiku-2028',
};
client.setOptions({ modelOptions, promptCache: true });
const anthropicClient = client.getClient(modelOptions);
expect(anthropicClient._options.defaultHeaders).toBeDefined();
expect(anthropicClient._options.defaultHeaders).toHaveProperty('anthropic-beta');
expect(anthropicClient._options.defaultHeaders['anthropic-beta']).toBe(
'prompt-caching-2024-07-31',
);
});
it('should not add beta header for other models', () => {
const client = new AnthropicClient('test-api-key');
client.setOptions({
modelOptions: {
model: 'claude-2',
},
});
const anthropicClient = client.getClient();
expect(anthropicClient.defaultHeaders).not.toHaveProperty('anthropic-beta');
});
});
describe('calculateCurrentTokenCount', () => {
let client;
beforeEach(() => {
client = new AnthropicClient('test-api-key');
});
it('should calculate correct token count when usage is provided', () => {
const tokenCountMap = {
msg1: 10,
msg2: 20,
currentMsg: 30,
};
const currentMessageId = 'currentMsg';
const usage = {
input_tokens: 70,
output_tokens: 50,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(40); // 70 - (10 + 20) = 40
});
it('should return original estimate if calculation results in negative value', () => {
const tokenCountMap = {
msg1: 40,
msg2: 50,
currentMsg: 30,
};
const currentMessageId = 'currentMsg';
const usage = {
input_tokens: 80,
output_tokens: 50,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(30); // Original estimate
});
it('should handle cache creation and read input tokens', () => {
const tokenCountMap = {
msg1: 10,
msg2: 20,
currentMsg: 30,
};
const currentMessageId = 'currentMsg';
const usage = {
input_tokens: 50,
cache_creation_input_tokens: 10,
cache_read_input_tokens: 20,
output_tokens: 40,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(50); // (50 + 10 + 20) - (10 + 20) = 50
});
it('should handle missing usage properties', () => {
const tokenCountMap = {
msg1: 10,
msg2: 20,
currentMsg: 30,
};
const currentMessageId = 'currentMsg';
const usage = {
output_tokens: 40,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(30); // Original estimate
});
it('should handle empty tokenCountMap', () => {
const tokenCountMap = {};
const currentMessageId = 'currentMsg';
const usage = {
input_tokens: 50,
output_tokens: 40,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(50);
expect(Number.isNaN(result)).toBe(false);
});
it('should handle zero values in usage', () => {
const tokenCountMap = {
msg1: 10,
currentMsg: 20,
};
const currentMessageId = 'currentMsg';
const usage = {
input_tokens: 0,
cache_creation_input_tokens: 0,
cache_read_input_tokens: 0,
output_tokens: 0,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(20); // Should return original estimate
expect(Number.isNaN(result)).toBe(false);
});
it('should handle undefined usage', () => {
const tokenCountMap = {
msg1: 10,
currentMsg: 20,
};
const currentMessageId = 'currentMsg';
const usage = undefined;
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(20); // Should return original estimate
expect(Number.isNaN(result)).toBe(false);
});
it('should handle non-numeric values in tokenCountMap', () => {
const tokenCountMap = {
msg1: 'ten',
currentMsg: 20,
};
const currentMessageId = 'currentMsg';
const usage = {
input_tokens: 30,
output_tokens: 10,
};
const result = client.calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage });
expect(result).toBe(30); // Should return 30 (input_tokens) - 0 (ignored 'ten') = 30
expect(Number.isNaN(result)).toBe(false);
});
});
});

View File

@@ -1,7 +1,7 @@
const { Constants } = require('librechat-data-provider');
const { initializeFakeClient } = require('./FakeClient');
jest.mock('~/lib/db/connectDb');
jest.mock('../../../lib/db/connectDb');
jest.mock('~/models', () => ({
User: jest.fn(),
Key: jest.fn(),
@@ -565,24 +565,18 @@ describe('BaseClient', () => {
const getReqData = jest.fn();
const opts = { getReqData };
const response = await TestClient.sendMessage('Hello, world!', opts);
expect(getReqData).toHaveBeenCalledWith(
expect.objectContaining({
userMessage: expect.objectContaining({ text: 'Hello, world!' }),
conversationId: response.conversationId,
responseMessageId: response.messageId,
}),
);
expect(getReqData).toHaveBeenCalledWith({
userMessage: expect.objectContaining({ text: 'Hello, world!' }),
conversationId: response.conversationId,
responseMessageId: response.messageId,
});
});
test('onStart is called with the correct arguments', async () => {
const onStart = jest.fn();
const opts = { onStart };
await TestClient.sendMessage('Hello, world!', opts);
expect(onStart).toHaveBeenCalledWith(
expect.objectContaining({ text: 'Hello, world!' }),
expect.any(String),
);
expect(onStart).toHaveBeenCalledWith(expect.objectContaining({ text: 'Hello, world!' }));
});
test('saveMessageToDatabase is called with the correct arguments', async () => {
@@ -633,32 +627,5 @@ describe('BaseClient', () => {
}),
);
});
test('userMessagePromise is awaited before saving response message', async () => {
// Mock the saveMessageToDatabase method
TestClient.saveMessageToDatabase = jest.fn().mockImplementation(() => {
return new Promise((resolve) => setTimeout(resolve, 100)); // Simulate a delay
});
// Send a message
const messagePromise = TestClient.sendMessage('Hello, world!');
// Wait a short time to ensure the user message save has started
await new Promise((resolve) => setTimeout(resolve, 50));
// Check that saveMessageToDatabase has been called once (for the user message)
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledTimes(1);
// Wait for the message to be fully processed
await messagePromise;
// Check that saveMessageToDatabase has been called twice (once for user message, once for response)
expect(TestClient.saveMessageToDatabase).toHaveBeenCalledTimes(2);
// Check the order of calls
const calls = TestClient.saveMessageToDatabase.mock.calls;
expect(calls[0][0].isCreatedByUser).toBe(true); // First call should be for user message
expect(calls[1][0].isCreatedByUser).toBe(false); // Second call should be for response message
});
});
});

View File

@@ -38,12 +38,7 @@ const run = async () => {
"On the other hand, we denounce with righteous indignation and dislike men who are so beguiled and demoralized by the charms of pleasure of the moment, so blinded by desire, that they cannot foresee the pain and trouble that are bound to ensue; and equal blame belongs to those who fail in their duty through weakness of will, which is the same as saying through shrinking from toil and pain. These cases are perfectly simple and easy to distinguish. In a free hour, when our power of choice is untrammelled and when nothing prevents our being able to do what we like best, every pleasure is to be welcomed and every pain avoided. But in certain circumstances and owing to the claims of duty or the obligations of business it will frequently occur that pleasures have to be repudiated and annoyances accepted. The wise man therefore always holds in these matters to this principle of selection: he rejects pleasures to secure other greater pleasures, or else he endures pains to avoid worse pains."
`;
const model = 'gpt-3.5-turbo';
let maxContextTokens = 4095;
if (model === 'gpt-4') {
maxContextTokens = 8191;
} else if (model === 'gpt-4-32k') {
maxContextTokens = 32767;
}
const maxContextTokens = model === 'gpt-4' ? 8191 : model === 'gpt-4-32k' ? 32767 : 4095; // 1 less than maximum
const clientOptions = {
reverseProxyUrl: process.env.OPENAI_REVERSE_PROXY || null,
maxContextTokens,

View File

@@ -194,7 +194,6 @@ describe('PluginsClient', () => {
expect(client.getFunctionModelName('')).toBe('gpt-3.5-turbo');
});
});
describe('Azure OpenAI tests specific to Plugins', () => {
// TODO: add more tests for Azure OpenAI integration with Plugins
// let client;
@@ -221,94 +220,4 @@ describe('PluginsClient', () => {
spy.mockRestore();
});
});
describe('sendMessage with filtered tools', () => {
let TestAgent;
const apiKey = 'fake-api-key';
const mockTools = [{ name: 'tool1' }, { name: 'tool2' }, { name: 'tool3' }, { name: 'tool4' }];
beforeEach(() => {
TestAgent = new PluginsClient(apiKey, {
tools: mockTools,
modelOptions: {
model: 'gpt-3.5-turbo',
temperature: 0,
max_tokens: 2,
},
agentOptions: {
model: 'gpt-3.5-turbo',
},
});
TestAgent.options.req = {
app: {
locals: {},
},
};
TestAgent.sendMessage = jest.fn().mockImplementation(async () => {
const { filteredTools = [], includedTools = [] } = TestAgent.options.req.app.locals;
if (includedTools.length > 0) {
const tools = TestAgent.options.tools.filter((plugin) =>
includedTools.includes(plugin.name),
);
TestAgent.options.tools = tools;
} else {
const tools = TestAgent.options.tools.filter(
(plugin) => !filteredTools.includes(plugin.name),
);
TestAgent.options.tools = tools;
}
return {
text: 'Mocked response',
tools: TestAgent.options.tools,
};
});
});
test('should filter out tools when filteredTools is provided', async () => {
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool2' }),
expect.objectContaining({ name: 'tool4' }),
]),
);
});
test('should only include specified tools when includedTools is provided', async () => {
TestAgent.options.req.app.locals.includedTools = ['tool2', 'tool4'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool2' }),
expect.objectContaining({ name: 'tool4' }),
]),
);
});
test('should prioritize includedTools over filteredTools', async () => {
TestAgent.options.req.app.locals.filteredTools = ['tool1', 'tool3'];
TestAgent.options.req.app.locals.includedTools = ['tool1', 'tool2'];
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(2);
expect(response.tools).toEqual(
expect.arrayContaining([
expect.objectContaining({ name: 'tool1' }),
expect.objectContaining({ name: 'tool2' }),
]),
);
});
test('should not modify tools when no filters are provided', async () => {
const response = await TestAgent.sendMessage('Test message');
expect(response.tools).toHaveLength(4);
expect(response.tools).toEqual(expect.arrayContaining(mockTools));
});
});
});

View File

@@ -12,15 +12,9 @@ class GoogleSearchResults extends Tool {
this.envVarApiKey = 'GOOGLE_SEARCH_API_KEY';
this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
this.override = fields.override ?? false;
this.apiKey = fields[this.envVarApiKey] ?? getEnvironmentVariable(this.envVarApiKey);
this.apiKey = fields.apiKey ?? getEnvironmentVariable(this.envVarApiKey);
this.searchEngineId =
fields[this.envVarSearchEngineId] ?? getEnvironmentVariable(this.envVarSearchEngineId);
if (!this.override && (!this.apiKey || !this.searchEngineId)) {
throw new Error(
`Missing ${this.envVarApiKey} or ${this.envVarSearchEngineId} environment variable.`,
);
}
fields.searchEngineId ?? getEnvironmentVariable(this.envVarSearchEngineId);
this.kwargs = fields?.kwargs ?? {};
this.name = 'google';

View File

@@ -1,78 +0,0 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
function createTavilySearchTool(fields = {}) {
const envVar = 'TAVILY_API_KEY';
const override = fields.override ?? false;
const apiKey = fields.apiKey ?? getApiKey(envVar, override);
const kwargs = fields?.kwargs ?? {};
function getApiKey(envVar, override) {
const key = getEnvironmentVariable(envVar);
if (!key && !override) {
throw new Error(`Missing ${envVar} environment variable.`);
}
return key;
}
return tool(
async (input) => {
const { query, ...rest } = input;
const requestBody = {
api_key: apiKey,
query,
...rest,
...kwargs,
};
const response = await fetch('https://api.tavily.com/search', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(requestBody),
});
const json = await response.json();
if (!response.ok) {
throw new Error(`Request failed with status ${response.status}: ${json.error}`);
}
return JSON.stringify(json);
},
{
name: 'tavily_search_results_json',
description:
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.',
schema: z.object({
query: z.string().min(1).describe('The search query string.'),
max_results: z
.number()
.min(1)
.max(10)
.optional()
.describe('The maximum number of search results to return. Defaults to 5.'),
search_depth: z
.enum(['basic', 'advanced'])
.optional()
.describe(
'The depth of the search, affecting result quality and response time (`basic` or `advanced`). Default is basic for quick results and advanced for indepth high quality results but longer response time. Advanced calls equals 2 requests.',
),
include_images: z
.boolean()
.optional()
.describe(
'Whether to include a list of query-related images in the response. Default is False.',
),
include_answer: z
.boolean()
.optional()
.describe('Whether to include answers in the search results. Default is False.'),
}),
},
);
}
module.exports = createTavilySearchTool;

View File

@@ -12,7 +12,7 @@ class TavilySearchResults extends Tool {
this.envVar = 'TAVILY_API_KEY';
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
this.apiKey = fields[this.envVar] ?? this.getApiKey();
this.apiKey = fields.apiKey ?? this.getApiKey();
this.kwargs = fields?.kwargs ?? {};
this.name = 'tavily_search_results_json';
@@ -82,9 +82,7 @@ class TavilySearchResults extends Tool {
const json = await response.json();
if (!response.ok) {
throw new Error(
`Request failed with status ${response.status}: ${json?.detail?.error || json?.error}`,
);
throw new Error(`Request failed with status ${response.status}: ${json.error}`);
}
return JSON.stringify(json);

View File

@@ -1,50 +0,0 @@
const GoogleSearch = require('../GoogleSearch');
jest.mock('node-fetch');
jest.mock('@langchain/core/utils/env');
describe('GoogleSearch', () => {
let originalEnv;
const mockApiKey = 'mock_api';
const mockSearchEngineId = 'mock_search_engine_id';
beforeAll(() => {
originalEnv = { ...process.env };
});
beforeEach(() => {
jest.resetModules();
process.env = {
...originalEnv,
GOOGLE_SEARCH_API_KEY: mockApiKey,
GOOGLE_CSE_ID: mockSearchEngineId,
};
});
afterEach(() => {
jest.clearAllMocks();
process.env = originalEnv;
});
it('should use mockApiKey and mockSearchEngineId when environment variables are not set', () => {
const instance = new GoogleSearch({
GOOGLE_SEARCH_API_KEY: mockApiKey,
GOOGLE_CSE_ID: mockSearchEngineId,
});
expect(instance.apiKey).toBe(mockApiKey);
expect(instance.searchEngineId).toBe(mockSearchEngineId);
});
it('should throw an error if GOOGLE_SEARCH_API_KEY or GOOGLE_CSE_ID is missing', () => {
delete process.env.GOOGLE_SEARCH_API_KEY;
expect(() => new GoogleSearch()).toThrow(
'Missing GOOGLE_SEARCH_API_KEY or GOOGLE_CSE_ID environment variable.',
);
process.env.GOOGLE_SEARCH_API_KEY = mockApiKey;
delete process.env.GOOGLE_CSE_ID;
expect(() => new GoogleSearch()).toThrow(
'Missing GOOGLE_SEARCH_API_KEY or GOOGLE_CSE_ID environment variable.',
);
});
});

View File

@@ -1,38 +0,0 @@
const TavilySearchResults = require('../TavilySearchResults');
jest.mock('node-fetch');
jest.mock('@langchain/core/utils/env');
describe('TavilySearchResults', () => {
let originalEnv;
const mockApiKey = 'mock_api_key';
beforeAll(() => {
originalEnv = { ...process.env };
});
beforeEach(() => {
jest.resetModules();
process.env = {
...originalEnv,
TAVILY_API_KEY: mockApiKey,
};
});
afterEach(() => {
jest.clearAllMocks();
process.env = originalEnv;
});
it('should throw an error if TAVILY_API_KEY is missing', () => {
delete process.env.TAVILY_API_KEY;
expect(() => new TavilySearchResults()).toThrow('Missing TAVILY_API_KEY environment variable.');
});
it('should use mockApiKey when TAVILY_API_KEY is not set in the environment', () => {
const instance = new TavilySearchResults({
TAVILY_API_KEY: mockApiKey,
});
expect(instance.apiKey).toBe(mockApiKey);
});
});

View File

@@ -35,7 +35,7 @@ const clearPendingReq = async ({ userId, cache: _cache }) => {
return;
}
const key = `${isEnabled(USE_REDIS) ? namespace : ''}:${userId ?? ''}`;
const key = `${USE_REDIS ? namespace : ''}:${userId ?? ''}`;
const currentReq = +((await cache.get(key)) ?? 0);
if (currentReq && currentReq >= 1) {

View File

@@ -1,11 +1,12 @@
const Keyv = require('keyv');
const { CacheKeys, ViolationTypes, Time } = require('librechat-data-provider');
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
const THIRTY_MINUTES = 1800000;
const duration = math(BAN_DURATION, 7200000);
@@ -23,25 +24,13 @@ const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const roles = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ROLES });
const tokenConfig = isEnabled(USE_REDIS) // ttl: 30 minutes
? new Keyv({ store: keyvRedis, ttl: THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: THIRTY_MINUTES });
const audioRuns = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
const messages = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.FIVE_MINUTES })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.FIVE_MINUTES });
const tokenConfig = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
const genTitle = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
? new Keyv({ store: keyvRedis, ttl: 120000 })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: 120000 });
const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
@@ -49,10 +38,9 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
const abortKeys = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
const namespaces = {
[CacheKeys.ROLES]: roles,
[CacheKeys.CONFIG_STORE]: config,
pending_req,
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
@@ -67,14 +55,7 @@ const namespaces = {
message_limit: createViolationInstance('message_limit'),
token_balance: createViolationInstance(ViolationTypes.TOKEN_BALANCE),
registrations: createViolationInstance('registrations'),
[ViolationTypes.TTS_LIMIT]: createViolationInstance(ViolationTypes.TTS_LIMIT),
[ViolationTypes.STT_LIMIT]: createViolationInstance(ViolationTypes.STT_LIMIT),
[ViolationTypes.CONVO_ACCESS]: createViolationInstance(ViolationTypes.CONVO_ACCESS),
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
[ViolationTypes.VERIFY_EMAIL_LIMIT]: createViolationInstance(ViolationTypes.VERIFY_EMAIL_LIMIT),
[ViolationTypes.RESET_PASSWORD_LIMIT]: createViolationInstance(
ViolationTypes.RESET_PASSWORD_LIMIT,
),
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
ViolationTypes.ILLEGAL_MODEL_REQUEST,
),
@@ -83,8 +64,6 @@ const namespaces = {
[CacheKeys.TOKEN_CONFIG]: tokenConfig,
[CacheKeys.GEN_TITLE]: genTitle,
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
};
/**

View File

@@ -1,6 +1,6 @@
const { isEnabled } = require('~/server/utils');
const getLogStores = require('./getLogStores');
const banViolation = require('./banViolation');
const { isEnabled } = require('../server/utils');
/**
* Logs the violation.

View File

@@ -27,25 +27,26 @@ function getMatchingSensitivePatterns(valueStr) {
}
/**
* Redacts sensitive information from a console message and trims it to a specified length if provided.
* Redacts sensitive information from a console message.
*
* @param {string} str - The console message to be redacted.
* @param {number} [trimLength] - The optional length at which to trim the redacted message.
* @returns {string} - The redacted and optionally trimmed console message.
* @returns {string} - The redacted console message.
*/
function redactMessage(str, trimLength) {
function redactMessage(str) {
if (!str) {
return '';
}
const patterns = getMatchingSensitivePatterns(str);
if (patterns.length === 0) {
return str;
}
patterns.forEach((pattern) => {
str = str.replace(pattern, '$1[REDACTED]');
});
if (trimLength !== undefined && str.length > trimLength) {
return `${str.substring(0, trimLength)}...`;
}
return str;
}
@@ -109,14 +110,6 @@ const condenseArray = (item) => {
* @returns {string} - The formatted log message.
*/
const debugTraverse = winston.format.printf(({ level, message, timestamp, ...metadata }) => {
if (!message) {
return `${timestamp} ${level}`;
}
if (!message?.trim || typeof message !== 'string') {
return `${timestamp} ${level}: ${JSON.stringify(message)}`;
}
let msg = `${timestamp} ${level}: ${truncateLongStrings(message?.trim(), 150)}`;
try {
if (level !== 'debug') {

View File

@@ -5,16 +5,17 @@ const Action = mongoose.model('action', actionSchema);
/**
* Update an action with new data without overwriting existing properties,
* or create a new action if it doesn't exist.
* or create a new action if it doesn't exist, within a transaction session if provided.
*
* @param {Object} searchParams - The search parameters to find the action to update.
* @param {string} searchParams.action_id - The ID of the action to update.
* @param {string} searchParams.user - The user ID of the action's author.
* @param {Object} updateData - An object containing the properties to update.
* @returns {Promise<Action>} The updated or newly created action document as a plain object.
* @param {mongoose.ClientSession} [session] - The transaction session to use.
* @returns {Promise<Object>} The updated or newly created action document as a plain object.
*/
const updateAction = async (searchParams, updateData) => {
const options = { new: true, upsert: true };
const updateAction = async (searchParams, updateData, session = null) => {
const options = { new: true, upsert: true, session };
return await Action.findOneAndUpdate(searchParams, updateData, options).lean();
};
@@ -23,7 +24,7 @@ const updateAction = async (searchParams, updateData) => {
*
* @param {Object} searchParams - The search parameters to find matching actions.
* @param {boolean} includeSensitive - Flag to include sensitive data in the metadata.
* @returns {Promise<Array<Action>>} A promise that resolves to an array of action documents as plain objects.
* @returns {Promise<Array<Object>>} A promise that resolves to an array of action documents as plain objects.
*/
const getActions = async (searchParams, includeSensitive = false) => {
const actions = await Action.find(searchParams).lean();
@@ -48,27 +49,31 @@ const getActions = async (searchParams, includeSensitive = false) => {
};
/**
* Deletes an action by params.
* Deletes an action by params, within a transaction session if provided.
*
* @param {Object} searchParams - The search parameters to find the action to delete.
* @param {string} searchParams.action_id - The ID of the action to delete.
* @param {string} searchParams.user - The user ID of the action's author.
* @returns {Promise<Action>} A promise that resolves to the deleted action document as a plain object, or null if no document was found.
* @param {mongoose.ClientSession} [session] - The transaction session to use (optional).
* @returns {Promise<Object>} A promise that resolves to the deleted action document as a plain object, or null if no document was found.
*/
const deleteAction = async (searchParams) => {
return await Action.findOneAndDelete(searchParams).lean();
const deleteAction = async (searchParams, session = null) => {
const options = session ? { session } : {};
return await Action.findOneAndDelete(searchParams, options).lean();
};
/**
* Deletes actions by params.
* Deletes actions by params, within a transaction session if provided.
*
* @param {Object} searchParams - The search parameters to find the actions to delete.
* @param {string} searchParams.action_id - The ID of the action(s) to delete.
* @param {string} searchParams.user - The user ID of the action's author.
* @param {mongoose.ClientSession} [session] - The transaction session to use (optional).
* @returns {Promise<Number>} A promise that resolves to the number of deleted action documents.
*/
const deleteActions = async (searchParams) => {
const result = await Action.deleteMany(searchParams);
const deleteActions = async (searchParams, session = null) => {
const options = session ? { session } : {};
const result = await Action.deleteMany(searchParams, options);
return result.deletedCount;
};

View File

@@ -1,142 +0,0 @@
const mongoose = require('mongoose');
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
const {
getProjectByName,
addAgentIdsToProject,
removeAgentIdsFromProject,
removeAgentFromAllProjects,
} = require('./Project');
const agentSchema = require('./schema/agent');
const Agent = mongoose.model('agent', agentSchema);
/**
* Create an agent with the provided data.
* @param {Object} agentData - The agent data to create.
* @returns {Promise<Agent>} The created agent document as a plain object.
* @throws {Error} If the agent creation fails.
*/
const createAgent = async (agentData) => {
return await Agent.create(agentData);
};
/**
* Get an agent document based on the provided ID.
*
* @param {Object} searchParameter - The search parameters to find the agent to update.
* @param {string} searchParameter.id - The ID of the agent to update.
* @param {string} searchParameter.author - The user ID of the agent's author.
* @returns {Promise<Agent|null>} The agent document as a plain object, or null if not found.
*/
const getAgent = async (searchParameter) => await Agent.findOne(searchParameter).lean();
/**
* Update an agent with new data without overwriting existing
* properties, or create a new agent if it doesn't exist.
*
* @param {Object} searchParameter - The search parameters to find the agent to update.
* @param {string} searchParameter.id - The ID of the agent to update.
* @param {string} [searchParameter.author] - The user ID of the agent's author.
* @param {Object} updateData - An object containing the properties to update.
* @returns {Promise<Agent>} The updated or newly created agent document as a plain object.
*/
const updateAgent = async (searchParameter, updateData) => {
const options = { new: true, upsert: true };
return await Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
};
/**
* Deletes an agent based on the provided ID.
*
* @param {Object} searchParameter - The search parameters to find the agent to delete.
* @param {string} searchParameter.id - The ID of the agent to delete.
* @param {string} [searchParameter.author] - The user ID of the agent's author.
* @returns {Promise<void>} Resolves when the agent has been successfully deleted.
*/
const deleteAgent = async (searchParameter) => {
const agent = await Agent.findOneAndDelete(searchParameter);
if (agent) {
await removeAgentFromAllProjects(agent.id);
}
return agent;
};
/**
* Get all agents.
* @param {Object} searchParameter - The search parameters to find matching agents.
* @param {string} searchParameter.author - The user ID of the agent's author.
* @returns {Promise<Object>} A promise that resolves to an object containing the agents data and pagination info.
*/
const getListAgents = async (searchParameter) => {
const { author, ...otherParams } = searchParameter;
let query = Object.assign({ author }, otherParams);
const globalProject = await getProjectByName(GLOBAL_PROJECT_NAME, ['agentIds']);
if (globalProject && (globalProject.agentIds?.length ?? 0) > 0) {
const globalQuery = { id: { $in: globalProject.agentIds }, ...otherParams };
delete globalQuery.author;
query = { $or: [globalQuery, query] };
}
const agents = await Agent.find(query, {
id: 1,
name: 1,
avatar: 1,
projectIds: 1,
}).lean();
const hasMore = agents.length > 0;
const firstId = agents.length > 0 ? agents[0].id : null;
const lastId = agents.length > 0 ? agents[agents.length - 1].id : null;
return {
data: agents,
has_more: hasMore,
first_id: firstId,
last_id: lastId,
};
};
/**
* Updates the projects associated with an agent, adding and removing project IDs as specified.
* This function also updates the corresponding projects to include or exclude the agent ID.
*
* @param {string} agentId - The ID of the agent to update.
* @param {string[]} [projectIds] - Array of project IDs to add to the agent.
* @param {string[]} [removeProjectIds] - Array of project IDs to remove from the agent.
* @returns {Promise<MongoAgent>} The updated agent document.
* @throws {Error} If there's an error updating the agent or projects.
*/
const updateAgentProjects = async (agentId, projectIds, removeProjectIds) => {
const updateOps = {};
if (removeProjectIds && removeProjectIds.length > 0) {
for (const projectId of removeProjectIds) {
await removeAgentIdsFromProject(projectId, [agentId]);
}
updateOps.$pull = { projectIds: { $in: removeProjectIds } };
}
if (projectIds && projectIds.length > 0) {
for (const projectId of projectIds) {
await addAgentIdsToProject(projectId, [agentId]);
}
updateOps.$addToSet = { projectIds: { $each: projectIds } };
}
if (Object.keys(updateOps).length === 0) {
return await getAgent({ id: agentId });
}
return await updateAgent({ id: agentId }, updateOps);
};
module.exports = {
createAgent,
getAgent,
updateAgent,
deleteAgent,
getListAgents,
updateAgentProjects,
};

View File

@@ -5,16 +5,17 @@ const Assistant = mongoose.model('assistant', assistantSchema);
/**
* Update an assistant with new data without overwriting existing properties,
* or create a new assistant if it doesn't exist.
* or create a new assistant if it doesn't exist, within a transaction session if provided.
*
* @param {Object} searchParams - The search parameters to find the assistant to update.
* @param {string} searchParams.assistant_id - The ID of the assistant to update.
* @param {string} searchParams.user - The user ID of the assistant's author.
* @param {Object} updateData - An object containing the properties to update.
* @returns {Promise<AssistantDocument>} The updated or newly created assistant document as a plain object.
* @param {mongoose.ClientSession} [session] - The transaction session to use (optional).
* @returns {Promise<Object>} The updated or newly created assistant document as a plain object.
*/
const updateAssistantDoc = async (searchParams, updateData) => {
const options = { new: true, upsert: true };
const updateAssistant = async (searchParams, updateData, session = null) => {
const options = { new: true, upsert: true, session };
return await Assistant.findOneAndUpdate(searchParams, updateData, options).lean();
};
@@ -24,7 +25,7 @@ const updateAssistantDoc = async (searchParams, updateData) => {
* @param {Object} searchParams - The search parameters to find the assistant to update.
* @param {string} searchParams.assistant_id - The ID of the assistant to update.
* @param {string} searchParams.user - The user ID of the assistant's author.
* @returns {Promise<AssistantDocument|null>} The assistant document as a plain object, or null if not found.
* @returns {Promise<Object|null>} The assistant document as a plain object, or null if not found.
*/
const getAssistant = async (searchParams) => await Assistant.findOne(searchParams).lean();
@@ -32,17 +33,10 @@ const getAssistant = async (searchParams) => await Assistant.findOne(searchParam
* Retrieves all assistants that match the given search parameters.
*
* @param {Object} searchParams - The search parameters to find matching assistants.
* @param {Object} [select] - Optional. Specifies which document fields to include or exclude.
* @returns {Promise<Array<AssistantDocument>>} A promise that resolves to an array of assistant documents as plain objects.
* @returns {Promise<Array<Object>>} A promise that resolves to an array of action documents as plain objects.
*/
const getAssistants = async (searchParams, select = null) => {
let query = Assistant.find(searchParams);
if (select) {
query = query.select(select);
}
return await query.lean();
const getAssistants = async (searchParams) => {
return await Assistant.find(searchParams).lean();
};
/**
@@ -58,7 +52,7 @@ const deleteAssistant = async (searchParams) => {
};
module.exports = {
updateAssistantDoc,
updateAssistant,
deleteAssistant,
getAssistants,
getAssistant,

View File

@@ -1,61 +0,0 @@
const { logger } = require('~/config');
// const { Categories } = require('./schema/categories');
const options = [
{
label: '',
value: '',
},
{
label: 'idea',
value: 'idea',
},
{
label: 'travel',
value: 'travel',
},
{
label: 'teach_or_explain',
value: 'teach_or_explain',
},
{
label: 'write',
value: 'write',
},
{
label: 'shop',
value: 'shop',
},
{
label: 'code',
value: 'code',
},
{
label: 'misc',
value: 'misc',
},
{
label: 'roleplay',
value: 'roleplay',
},
{
label: 'finance',
value: 'finance',
},
];
module.exports = {
/**
* Retrieves the categories asynchronously.
* @returns {Promise<TGetCategoriesResponse>} An array of category objects.
* @throws {Error} If there is an error retrieving the categories.
*/
getCategories: async () => {
try {
// const categories = await Categories.find();
return options;
} catch (error) {
logger.error('Error getting categories', error);
return [];
}
},
};

View File

@@ -2,20 +2,6 @@ const Conversation = require('./schema/convoSchema');
const { getMessages, deleteMessages } = require('./Message');
const logger = require('~/config/winston');
/**
* Searches for a conversation by conversationId and returns a lean document with only conversationId and user.
* @param {string} conversationId - The conversation's ID.
* @returns {Promise<{conversationId: string, user: string} | null>} The conversation object with selected fields or null if not found.
*/
const searchConversation = async (conversationId) => {
try {
return await Conversation.findOne({ conversationId }, 'conversationId user').lean();
} catch (error) {
logger.error('[searchConversation] Error searching conversation', error);
throw new Error('Error searching conversation');
}
};
/**
* Retrieves a single conversation for a given user and conversation ID.
* @param {string} user - The user's ID.
@@ -33,40 +19,20 @@ const getConvo = async (user, conversationId) => {
module.exports = {
Conversation,
searchConversation,
/**
* Saves a conversation to the database.
* @param {Object} req - The request object.
* @param {string} conversationId - The conversation's ID.
* @param {Object} metadata - Additional metadata to log for operation.
* @returns {Promise<TConversation>} The conversation object.
*/
saveConvo: async (req, { conversationId, newConversationId, ...convo }, metadata) => {
saveConvo: async (user, { conversationId, newConversationId, ...convo }) => {
try {
if (metadata && metadata?.context) {
logger.debug(`[saveConvo] ${metadata.context}`);
}
const messages = await getMessages({ conversationId }, '_id');
const update = { ...convo, messages, user: req.user.id };
const messages = await getMessages({ conversationId });
const update = { ...convo, messages, user };
if (newConversationId) {
update.conversationId = newConversationId;
}
const conversation = await Conversation.findOneAndUpdate(
{ conversationId, user: req.user.id },
update,
{
new: true,
upsert: true,
},
);
return conversation.toObject();
return await Conversation.findOneAndUpdate({ conversationId: conversationId, user }, update, {
new: true,
upsert: true,
});
} catch (error) {
logger.error('[saveConvo] Error saving conversation', error);
if (metadata && metadata?.context) {
logger.info(`[saveConvo] ${metadata.context}`);
}
return { message: 'Error saving conversation' };
}
},
@@ -88,16 +54,13 @@ module.exports = {
throw new Error('Failed to save conversations in bulk.');
}
},
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false, tags) => {
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false) => {
const query = { user };
if (isArchived) {
query.isArchived = true;
} else {
query.$or = [{ isArchived: false }, { isArchived: { $exists: false } }];
}
if (Array.isArray(tags) && tags.length > 0) {
query.tags = { $in: tags };
}
try {
const totalConvos = (await Conversation.countDocuments(query)) || 1;
const totalPages = Math.ceil(totalConvos / pageSize);

View File

@@ -1,249 +0,0 @@
const ConversationTag = require('./schema/conversationTagSchema');
const Conversation = require('./schema/convoSchema');
const logger = require('~/config/winston');
/**
* Retrieves all conversation tags for a user.
* @param {string} user - The user ID.
* @returns {Promise<Array>} An array of conversation tags.
*/
const getConversationTags = async (user) => {
try {
return await ConversationTag.find({ user }).sort({ position: 1 }).lean();
} catch (error) {
logger.error('[getConversationTags] Error getting conversation tags', error);
throw new Error('Error getting conversation tags');
}
};
/**
* Creates a new conversation tag.
* @param {string} user - The user ID.
* @param {Object} data - The tag data.
* @param {string} data.tag - The tag name.
* @param {string} [data.description] - The tag description.
* @param {boolean} [data.addToConversation] - Whether to add the tag to a conversation.
* @param {string} [data.conversationId] - The conversation ID to add the tag to.
* @returns {Promise<Object>} The created tag.
*/
const createConversationTag = async (user, data) => {
try {
const { tag, description, addToConversation, conversationId } = data;
const existingTag = await ConversationTag.findOne({ user, tag }).lean();
if (existingTag) {
return existingTag;
}
const maxPosition = await ConversationTag.findOne({ user }).sort('-position').lean();
const position = (maxPosition?.position || 0) + 1;
const newTag = await ConversationTag.findOneAndUpdate(
{ tag, user },
{
tag,
user,
count: addToConversation ? 1 : 0,
position,
description,
$setOnInsert: { createdAt: new Date() },
},
{
new: true,
upsert: true,
lean: true,
},
);
if (addToConversation && conversationId) {
await Conversation.findOneAndUpdate(
{ user, conversationId },
{ $addToSet: { tags: tag } },
{ new: true },
);
}
return newTag;
} catch (error) {
logger.error('[createConversationTag] Error creating conversation tag', error);
throw new Error('Error creating conversation tag');
}
};
/**
* Updates an existing conversation tag.
* @param {string} user - The user ID.
* @param {string} oldTag - The current tag name.
* @param {Object} data - The updated tag data.
* @param {string} [data.tag] - The new tag name.
* @param {string} [data.description] - The updated description.
* @param {number} [data.position] - The new position.
* @returns {Promise<Object>} The updated tag.
*/
const updateConversationTag = async (user, oldTag, data) => {
try {
const { tag: newTag, description, position } = data;
const existingTag = await ConversationTag.findOne({ user, tag: oldTag }).lean();
if (!existingTag) {
return null;
}
if (newTag && newTag !== oldTag) {
const tagAlreadyExists = await ConversationTag.findOne({ user, tag: newTag }).lean();
if (tagAlreadyExists) {
throw new Error('Tag already exists');
}
await Conversation.updateMany({ user, tags: oldTag }, { $set: { 'tags.$': newTag } });
}
const updateData = {};
if (newTag) {
updateData.tag = newTag;
}
if (description !== undefined) {
updateData.description = description;
}
if (position !== undefined) {
await adjustPositions(user, existingTag.position, position);
updateData.position = position;
}
return await ConversationTag.findOneAndUpdate({ user, tag: oldTag }, updateData, {
new: true,
lean: true,
});
} catch (error) {
logger.error('[updateConversationTag] Error updating conversation tag', error);
throw new Error('Error updating conversation tag');
}
};
/**
* Adjusts positions of tags when a tag's position is changed.
* @param {string} user - The user ID.
* @param {number} oldPosition - The old position of the tag.
* @param {number} newPosition - The new position of the tag.
* @returns {Promise<void>}
*/
const adjustPositions = async (user, oldPosition, newPosition) => {
if (oldPosition === newPosition) {
return;
}
const update = oldPosition < newPosition ? { $inc: { position: -1 } } : { $inc: { position: 1 } };
const position =
oldPosition < newPosition
? {
$gt: Math.min(oldPosition, newPosition),
$lte: Math.max(oldPosition, newPosition),
}
: {
$gte: Math.min(oldPosition, newPosition),
$lt: Math.max(oldPosition, newPosition),
};
await ConversationTag.updateMany(
{
user,
position,
},
update,
);
};
/**
* Deletes a conversation tag.
* @param {string} user - The user ID.
* @param {string} tag - The tag to delete.
* @returns {Promise<Object>} The deleted tag.
*/
const deleteConversationTag = async (user, tag) => {
try {
const deletedTag = await ConversationTag.findOneAndDelete({ user, tag }).lean();
if (!deletedTag) {
return null;
}
await Conversation.updateMany({ user, tags: tag }, { $pull: { tags: tag } });
await ConversationTag.updateMany(
{ user, position: { $gt: deletedTag.position } },
{ $inc: { position: -1 } },
);
return deletedTag;
} catch (error) {
logger.error('[deleteConversationTag] Error deleting conversation tag', error);
throw new Error('Error deleting conversation tag');
}
};
/**
* Updates tags for a specific conversation.
* @param {string} user - The user ID.
* @param {string} conversationId - The conversation ID.
* @param {string[]} tags - The new set of tags for the conversation.
* @returns {Promise<string[]>} The updated list of tags for the conversation.
*/
const updateTagsForConversation = async (user, conversationId, tags) => {
try {
const conversation = await Conversation.findOne({ user, conversationId }).lean();
if (!conversation) {
throw new Error('Conversation not found');
}
const oldTags = new Set(conversation.tags);
const newTags = new Set(tags);
const addedTags = [...newTags].filter((tag) => !oldTags.has(tag));
const removedTags = [...oldTags].filter((tag) => !newTags.has(tag));
const bulkOps = [];
for (const tag of addedTags) {
bulkOps.push({
updateOne: {
filter: { user, tag },
update: { $inc: { count: 1 } },
upsert: true,
},
});
}
for (const tag of removedTags) {
bulkOps.push({
updateOne: {
filter: { user, tag },
update: { $inc: { count: -1 } },
},
});
}
if (bulkOps.length > 0) {
await ConversationTag.bulkWrite(bulkOps);
}
const updatedConversation = (
await Conversation.findOneAndUpdate(
{ user, conversationId },
{ $set: { tags: [...newTags] } },
{ new: true },
)
).toObject();
return updatedConversation.tags;
} catch (error) {
logger.error('[updateTagsForConversation] Error updating tags', error);
throw new Error('Error updating tags for conversation');
}
};
module.exports = {
getConversationTags,
createConversationTag,
updateConversationTag,
deleteConversationTag,
updateTagsForConversation,
};

View File

@@ -97,12 +97,8 @@ const deleteFileByFilter = async (filter) => {
* @param {Array<string>} file_ids - The unique identifiers of the files to delete.
* @returns {Promise<Object>} A promise that resolves to the result of the deletion operation.
*/
const deleteFiles = async (file_ids, user) => {
let deleteQuery = { file_id: { $in: file_ids } };
if (user) {
deleteQuery = { user: user };
}
return await File.deleteMany(deleteQuery);
const deleteFiles = async (file_ids) => {
return await File.deleteMany({ file_id: { $in: file_ids } });
};
module.exports = {

View File

@@ -1,294 +1,191 @@
const { z } = require('zod');
const Message = require('./schema/messageSchema');
const { logger } = require('~/config');
const logger = require('~/config/winston');
const idSchema = z.string().uuid();
/**
* Saves a message in the database.
*
* @async
* @function saveMessage
* @param {Express.Request} req - The request object containing user information.
* @param {Object} params - The message data object.
* @param {string} params.endpoint - The endpoint where the message originated.
* @param {string} params.iconURL - The URL of the sender's icon.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.newMessageId - The new unique identifier for the message (if applicable).
* @param {string} params.conversationId - The identifier of the conversation.
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
* @param {string} params.sender - The identifier of the sender.
* @param {string} params.text - The text content of the message.
* @param {boolean} params.isCreatedByUser - Indicates if the message was created by the user.
* @param {string} [params.error] - Any error associated with the message.
* @param {boolean} [params.unfinished] - Indicates if the message is unfinished.
* @param {Object[]} [params.files] - An array of files associated with the message.
* @param {boolean} [params.isEdited] - Indicates if the message was edited.
* @param {string} [params.finish_reason] - Reason for finishing the message.
* @param {number} [params.tokenCount] - The number of tokens in the message.
* @param {string} [params.plugin] - Plugin associated with the message.
* @param {string[]} [params.plugins] - An array of plugins associated with the message.
* @param {string} [params.model] - The model used to generate the message.
* @param {Object} [metadata] - Additional metadata for this operation
* @param {string} [metadata.context] - The context of the operation
* @returns {Promise<TMessage>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async function saveMessage(req, params, metadata) {
if (!req?.user?.id) {
throw new Error('User not authenticated');
}
const validConvoId = idSchema.safeParse(params.conversationId);
if (!validConvoId.success) {
logger.warn(`Invalid conversation ID: ${params.conversationId}`);
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
logger.info(`---Invalid conversation ID Params: ${JSON.stringify(params, null, 2)}`);
return;
}
try {
const update = {
...params,
user: req.user.id,
messageId: params.newMessageId || params.messageId,
};
const message = await Message.findOneAndUpdate(
{ messageId: params.messageId, user: req.user.id },
update,
{ upsert: true, new: true },
);
return message.toObject();
} catch (err) {
logger.error('Error saving message:', err);
logger.info(`---\`saveMessage\` context: ${metadata?.context}`);
throw err;
}
}
/**
* Saves multiple messages in the database in bulk.
*
* @async
* @function bulkSaveMessages
* @param {Object[]} messages - An array of message objects to save.
* @returns {Promise<Object>} The result of the bulk write operation.
* @throws {Error} If there is an error in saving messages in bulk.
*/
async function bulkSaveMessages(messages) {
try {
const bulkOps = messages.map((message) => ({
updateOne: {
filter: { messageId: message.messageId },
update: message,
upsert: true,
},
}));
const result = await Message.bulkWrite(bulkOps);
return result;
} catch (err) {
logger.error('Error saving messages in bulk:', err);
throw err;
}
}
/**
* Records a message in the database.
*
* @async
* @function recordMessage
* @param {Object} params - The message data object.
* @param {string} params.user - The identifier of the user.
* @param {string} params.endpoint - The endpoint where the message originated.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
* @param {Partial<TMessage>} rest - Any additional properties from the TMessage typedef not explicitly listed.
* @returns {Promise<Object>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async function recordMessage({
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest
}) {
try {
// No parsing of convoId as may use threadId
const message = {
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest,
};
return await Message.findOneAndUpdate({ user, messageId }, message, {
upsert: true,
new: true,
});
} catch (err) {
logger.error('Error recording message:', err);
throw err;
}
}
/**
* Updates the text of a message.
*
* @async
* @function updateMessageText
* @param {Object} params - The update data object.
* @param {Object} req - The request object.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.text - The new text content of the message.
* @returns {Promise<void>}
* @throws {Error} If there is an error in updating the message text.
*/
async function updateMessageText(req, { messageId, text }) {
try {
await Message.updateOne({ messageId, user: req.user.id }, { text });
} catch (err) {
logger.error('Error updating message text:', err);
throw err;
}
}
/**
* Updates a message.
*
* @async
* @function updateMessage
* @param {Object} req - The request object.
* @param {Object} message - The message object containing update data.
* @param {string} message.messageId - The unique identifier for the message.
* @param {string} [message.text] - The new text content of the message.
* @param {Object[]} [message.files] - The files associated with the message.
* @param {boolean} [message.isCreatedByUser] - Indicates if the message was created by the user.
* @param {string} [message.sender] - The identifier of the sender.
* @param {number} [message.tokenCount] - The number of tokens in the message.
* @param {Object} [metadata] - The operation metadata
* @param {string} [metadata.context] - The operation metadata
* @returns {Promise<TMessage>} The updated message document.
* @throws {Error} If there is an error in updating the message or if the message is not found.
*/
async function updateMessage(req, message, metadata) {
try {
const { messageId, ...update } = message;
update.isEdited = true;
const updatedMessage = await Message.findOneAndUpdate(
{ messageId, user: req.user.id },
update,
{
new: true,
},
);
if (!updatedMessage) {
throw new Error('Message not found or user not authorized.');
}
return {
messageId: updatedMessage.messageId,
conversationId: updatedMessage.conversationId,
parentMessageId: updatedMessage.parentMessageId,
sender: updatedMessage.sender,
text: updatedMessage.text,
isCreatedByUser: updatedMessage.isCreatedByUser,
tokenCount: updatedMessage.tokenCount,
isEdited: true,
};
} catch (err) {
logger.error('Error updating message:', err);
if (metadata && metadata?.context) {
logger.info(`---\`updateMessage\` context: ${metadata.context}`);
}
throw err;
}
}
/**
* Deletes messages in a conversation since a specific message.
*
* @async
* @function deleteMessagesSince
* @param {Object} params - The parameters object.
* @param {Object} req - The request object.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @returns {Promise<Number>} The number of deleted messages.
* @throws {Error} If there is an error in deleting messages.
*/
async function deleteMessagesSince(req, { messageId, conversationId }) {
try {
const message = await Message.findOne({ messageId, user: req.user.id }).lean();
if (message) {
const query = Message.find({ conversationId, user: req.user.id });
return await query.deleteMany({
createdAt: { $gt: message.createdAt },
});
}
return undefined;
} catch (err) {
logger.error('Error deleting messages:', err);
throw err;
}
}
/**
* Retrieves messages from the database.
* @async
* @function getMessages
* @param {Record<string, unknown>} filter - The filter criteria.
* @param {string | undefined} [select] - The fields to select.
* @returns {Promise<TMessage[]>} The messages that match the filter criteria.
* @throws {Error} If there is an error in retrieving messages.
*/
async function getMessages(filter, select) {
try {
if (select) {
return await Message.find(filter).select(select).sort({ createdAt: 1 }).lean();
}
return await Message.find(filter).sort({ createdAt: 1 }).lean();
} catch (err) {
logger.error('Error getting messages:', err);
throw err;
}
}
/**
* Deletes messages from the database.
*
* @async
* @function deleteMessages
* @param {Object} filter - The filter criteria to find messages to delete.
* @returns {Promise<Object>} The metadata with count of deleted messages.
* @throws {Error} If there is an error in deleting messages.
*/
async function deleteMessages(filter) {
try {
return await Message.deleteMany(filter);
} catch (err) {
logger.error('Error deleting messages:', err);
throw err;
}
}
module.exports = {
Message,
saveMessage,
bulkSaveMessages,
recordMessage,
updateMessageText,
updateMessage,
deleteMessagesSince,
getMessages,
deleteMessages,
async saveMessage({
user,
endpoint,
iconURL,
messageId,
newMessageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
error,
unfinished,
files,
isEdited,
finish_reason,
tokenCount,
plugin,
plugins,
model,
}) {
try {
const validConvoId = idSchema.safeParse(conversationId);
if (!validConvoId.success) {
return;
}
const update = {
user,
iconURL,
endpoint,
messageId: newMessageId || messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
isEdited,
finish_reason,
error,
unfinished,
tokenCount,
plugin,
plugins,
model,
};
if (files) {
update.files = files;
}
// may also need to update the conversation here
await Message.findOneAndUpdate({ messageId }, update, { upsert: true, new: true });
return {
messageId,
conversationId,
parentMessageId,
sender,
text,
isCreatedByUser,
tokenCount,
};
} catch (err) {
logger.error('Error saving message:', err);
throw new Error('Failed to save message.');
}
},
async bulkSaveMessages(messages) {
try {
const bulkOps = messages.map((message) => ({
updateOne: {
filter: { messageId: message.messageId },
update: message,
upsert: true,
},
}));
const result = await Message.bulkWrite(bulkOps);
return result;
} catch (err) {
logger.error('Error saving messages in bulk:', err);
throw new Error('Failed to save messages in bulk.');
}
},
/**
* Records a message in the database.
*
* @async
* @function recordMessage
* @param {Object} params - The message data object.
* @param {string} params.user - The identifier of the user.
* @param {string} params.endpoint - The endpoint where the message originated.
* @param {string} params.messageId - The unique identifier for the message.
* @param {string} params.conversationId - The identifier of the conversation.
* @param {string} [params.parentMessageId] - The identifier of the parent message, if any.
* @param {Partial<TMessage>} rest - Any additional properties from the TMessage typedef not explicitly listed.
* @returns {Promise<Object>} The updated or newly inserted message document.
* @throws {Error} If there is an error in saving the message.
*/
async recordMessage({ user, endpoint, messageId, conversationId, parentMessageId, ...rest }) {
try {
// No parsing of convoId as may use threadId
const message = {
user,
endpoint,
messageId,
conversationId,
parentMessageId,
...rest,
};
return await Message.findOneAndUpdate({ user, messageId }, message, {
upsert: true,
new: true,
});
} catch (err) {
logger.error('Error saving message:', err);
throw new Error('Failed to save message.');
}
},
async updateMessage(message) {
try {
const { messageId, ...update } = message;
update.isEdited = true;
const updatedMessage = await Message.findOneAndUpdate({ messageId }, update, {
new: true,
});
if (!updatedMessage) {
throw new Error('Message not found.');
}
return {
messageId: updatedMessage.messageId,
conversationId: updatedMessage.conversationId,
parentMessageId: updatedMessage.parentMessageId,
sender: updatedMessage.sender,
text: updatedMessage.text,
isCreatedByUser: updatedMessage.isCreatedByUser,
tokenCount: updatedMessage.tokenCount,
isEdited: true,
};
} catch (err) {
logger.error('Error updating message:', err);
throw new Error('Failed to update message.');
}
},
async deleteMessagesSince({ messageId, conversationId }) {
try {
const message = await Message.findOne({ messageId }).lean();
if (message) {
return await Message.find({ conversationId }).deleteMany({
createdAt: { $gt: message.createdAt },
});
}
} catch (err) {
logger.error('Error deleting messages:', err);
throw new Error('Failed to delete messages.');
}
},
async getMessages(filter) {
try {
return await Message.find(filter).sort({ createdAt: 1 }).lean();
} catch (err) {
logger.error('Error getting messages:', err);
throw new Error('Failed to get messages.');
}
},
async deleteMessages(filter) {
try {
return await Message.deleteMany(filter);
} catch (err) {
logger.error('Error deleting messages:', err);
throw new Error('Failed to delete messages.');
}
},
};

View File

@@ -1,239 +0,0 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
jest.mock('mongoose');
const mockFindQuery = {
select: jest.fn().mockReturnThis(),
sort: jest.fn().mockReturnThis(),
lean: jest.fn().mockReturnThis(),
deleteMany: jest.fn().mockResolvedValue({ deletedCount: 1 }),
};
const mockSchema = {
findOneAndUpdate: jest.fn(),
updateOne: jest.fn(),
findOne: jest.fn(() => ({
lean: jest.fn(),
})),
find: jest.fn(() => mockFindQuery),
deleteMany: jest.fn(),
};
mongoose.model.mockReturnValue(mockSchema);
jest.mock('~/models/schema/messageSchema', () => mockSchema);
jest.mock('~/config/winston', () => ({
error: jest.fn(),
}));
const {
saveMessage,
getMessages,
updateMessage,
deleteMessages,
updateMessageText,
deleteMessagesSince,
} = require('~/models/Message');
describe('Message Operations', () => {
let mockReq;
let mockMessage;
beforeEach(() => {
jest.clearAllMocks();
mockReq = {
user: { id: 'user123' },
};
mockMessage = {
messageId: 'msg123',
conversationId: uuidv4(),
text: 'Hello, world!',
user: 'user123',
};
mockSchema.findOneAndUpdate.mockResolvedValue({
toObject: () => mockMessage,
});
});
describe('saveMessage', () => {
it('should save a message for an authenticated user', async () => {
const result = await saveMessage(mockReq, mockMessage);
expect(result).toEqual(mockMessage);
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
{ messageId: 'msg123', user: 'user123' },
expect.objectContaining({ user: 'user123' }),
expect.any(Object),
);
});
it('should throw an error for unauthenticated user', async () => {
mockReq.user = null;
await expect(saveMessage(mockReq, mockMessage)).rejects.toThrow('User not authenticated');
});
it('should throw an error for invalid conversation ID', async () => {
mockMessage.conversationId = 'invalid-id';
await expect(saveMessage(mockReq, mockMessage)).resolves.toBeUndefined();
});
});
describe('updateMessageText', () => {
it('should update message text for the authenticated user', async () => {
await updateMessageText(mockReq, { messageId: 'msg123', text: 'Updated text' });
expect(mockSchema.updateOne).toHaveBeenCalledWith(
{ messageId: 'msg123', user: 'user123' },
{ text: 'Updated text' },
);
});
});
describe('updateMessage', () => {
it('should update a message for the authenticated user', async () => {
mockSchema.findOneAndUpdate.mockResolvedValue(mockMessage);
const result = await updateMessage(mockReq, { messageId: 'msg123', text: 'Updated text' });
expect(result).toEqual(
expect.objectContaining({
messageId: 'msg123',
text: 'Hello, world!',
isEdited: true,
}),
);
});
it('should throw an error if message is not found', async () => {
mockSchema.findOneAndUpdate.mockResolvedValue(null);
await expect(
updateMessage(mockReq, { messageId: 'nonexistent', text: 'Test' }),
).rejects.toThrow('Message not found or user not authorized.');
});
});
describe('deleteMessagesSince', () => {
it('should delete messages only for the authenticated user', async () => {
mockSchema.findOne().lean.mockResolvedValueOnce({ createdAt: new Date() });
mockFindQuery.deleteMany.mockResolvedValueOnce({ deletedCount: 1 });
const result = await deleteMessagesSince(mockReq, {
messageId: 'msg123',
conversationId: 'convo123',
});
expect(mockSchema.findOne).toHaveBeenCalledWith({ messageId: 'msg123', user: 'user123' });
expect(mockSchema.find).not.toHaveBeenCalled();
expect(result).toBeUndefined();
});
it('should return undefined if no message is found', async () => {
mockSchema.findOne().lean.mockResolvedValueOnce(null);
const result = await deleteMessagesSince(mockReq, {
messageId: 'nonexistent',
conversationId: 'convo123',
});
expect(result).toBeUndefined();
});
});
describe('getMessages', () => {
it('should retrieve messages with the correct filter', async () => {
const filter = { conversationId: 'convo123' };
await getMessages(filter);
expect(mockSchema.find).toHaveBeenCalledWith(filter);
expect(mockFindQuery.sort).toHaveBeenCalledWith({ createdAt: 1 });
expect(mockFindQuery.lean).toHaveBeenCalled();
});
});
describe('deleteMessages', () => {
it('should delete messages with the correct filter', async () => {
await deleteMessages({ user: 'user123' });
expect(mockSchema.deleteMany).toHaveBeenCalledWith({ user: 'user123' });
});
});
describe('Conversation Hijacking Prevention', () => {
it('should not allow editing a message in another user\'s conversation', async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = 'victim-convo-123';
const victimMessageId = 'victim-msg-123';
mockSchema.findOneAndUpdate.mockResolvedValue(null);
await expect(
updateMessage(attackerReq, {
messageId: victimMessageId,
conversationId: victimConversationId,
text: 'Hacked message',
}),
).rejects.toThrow('Message not found or user not authorized.');
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
{ messageId: victimMessageId, user: 'attacker123' },
expect.anything(),
expect.anything(),
);
});
it('should not allow deleting messages from another user\'s conversation', async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = 'victim-convo-123';
const victimMessageId = 'victim-msg-123';
mockSchema.findOne().lean.mockResolvedValueOnce(null); // Simulating message not found for this user
const result = await deleteMessagesSince(attackerReq, {
messageId: victimMessageId,
conversationId: victimConversationId,
});
expect(result).toBeUndefined();
expect(mockSchema.findOne).toHaveBeenCalledWith({
messageId: victimMessageId,
user: 'attacker123',
});
});
it('should not allow inserting a new message into another user\'s conversation', async () => {
const attackerReq = { user: { id: 'attacker123' } };
const victimConversationId = uuidv4(); // Use a valid UUID
await expect(
saveMessage(attackerReq, {
conversationId: victimConversationId,
text: 'Inserted malicious message',
messageId: 'new-msg-123',
}),
).resolves.not.toThrow(); // It should not throw an error
// Check that the message was saved with the attacker's user ID
expect(mockSchema.findOneAndUpdate).toHaveBeenCalledWith(
{ messageId: 'new-msg-123', user: 'attacker123' },
expect.objectContaining({
user: 'attacker123',
conversationId: victimConversationId,
}),
expect.anything(),
);
});
it('should allow retrieving messages from any conversation', async () => {
const victimConversationId = 'victim-convo-123';
await getMessages({ conversationId: victimConversationId });
expect(mockSchema.find).toHaveBeenCalledWith({
conversationId: victimConversationId,
});
mockSchema.find.mockReturnValueOnce({
select: jest.fn().mockReturnThis(),
sort: jest.fn().mockReturnThis(),
lean: jest.fn().mockResolvedValue([{ text: 'Test message' }]),
});
const result = await getMessages({ conversationId: victimConversationId });
expect(result).toEqual([{ text: 'Test message' }]);
});
});
});

View File

@@ -1,136 +0,0 @@
const { model } = require('mongoose');
const { GLOBAL_PROJECT_NAME } = require('librechat-data-provider').Constants;
const projectSchema = require('~/models/schema/projectSchema');
const Project = model('Project', projectSchema);
/**
* Retrieve a project by ID and convert the found project document to a plain object.
*
* @param {string} projectId - The ID of the project to find and return as a plain object.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoProject>} A plain object representing the project document, or `null` if no project is found.
*/
const getProjectById = async function (projectId, fieldsToSelect = null) {
const query = Project.findById(projectId);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
/**
* Retrieve a project by name and convert the found project document to a plain object.
* If the project with the given name doesn't exist and the name is "instance", create it and return the lean version.
*
* @param {string} projectName - The name of the project to find or create.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoProject>} A plain object representing the project document.
*/
const getProjectByName = async function (projectName, fieldsToSelect = null) {
const query = { name: projectName };
const update = { $setOnInsert: { name: projectName } };
const options = {
new: true,
upsert: projectName === GLOBAL_PROJECT_NAME,
lean: true,
select: fieldsToSelect,
};
return await Project.findOneAndUpdate(query, update, options);
};
/**
* Add an array of prompt group IDs to a project's promptGroupIds array, ensuring uniqueness.
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} promptGroupIds - The array of prompt group IDs to add to the project.
* @returns {Promise<MongoProject>} The updated project document.
*/
const addGroupIdsToProject = async function (projectId, promptGroupIds) {
return await Project.findByIdAndUpdate(
projectId,
{ $addToSet: { promptGroupIds: { $each: promptGroupIds } } },
{ new: true },
);
};
/**
* Remove an array of prompt group IDs from a project's promptGroupIds array.
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} promptGroupIds - The array of prompt group IDs to remove from the project.
* @returns {Promise<MongoProject>} The updated project document.
*/
const removeGroupIdsFromProject = async function (projectId, promptGroupIds) {
return await Project.findByIdAndUpdate(
projectId,
{ $pull: { promptGroupIds: { $in: promptGroupIds } } },
{ new: true },
);
};
/**
* Remove a prompt group ID from all projects.
*
* @param {string} promptGroupId - The ID of the prompt group to remove from projects.
* @returns {Promise<void>}
*/
const removeGroupFromAllProjects = async (promptGroupId) => {
await Project.updateMany({}, { $pull: { promptGroupIds: promptGroupId } });
};
/**
* Add an array of agent IDs to a project's agentIds array, ensuring uniqueness.
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} agentIds - The array of agent IDs to add to the project.
* @returns {Promise<MongoProject>} The updated project document.
*/
const addAgentIdsToProject = async function (projectId, agentIds) {
return await Project.findByIdAndUpdate(
projectId,
{ $addToSet: { agentIds: { $each: agentIds } } },
{ new: true },
);
};
/**
* Remove an array of agent IDs from a project's agentIds array.
*
* @param {string} projectId - The ID of the project to update.
* @param {string[]} agentIds - The array of agent IDs to remove from the project.
* @returns {Promise<MongoProject>} The updated project document.
*/
const removeAgentIdsFromProject = async function (projectId, agentIds) {
return await Project.findByIdAndUpdate(
projectId,
{ $pull: { agentIds: { $in: agentIds } } },
{ new: true },
);
};
/**
* Remove an agent ID from all projects.
*
* @param {string} agentId - The ID of the agent to remove from projects.
* @returns {Promise<void>}
*/
const removeAgentFromAllProjects = async (agentId) => {
await Project.updateMany({}, { $pull: { agentIds: agentId } });
};
module.exports = {
getProjectById,
getProjectByName,
/* prompts */
addGroupIdsToProject,
removeGroupIdsFromProject,
removeGroupFromAllProjects,
/* agents */
addAgentIdsToProject,
removeAgentIdsFromProject,
removeAgentFromAllProjects,
};

View File

@@ -1,528 +1,52 @@
const { ObjectId } = require('mongodb');
const { SystemRoles, SystemCategories, Constants } = require('librechat-data-provider');
const {
getProjectByName,
addGroupIdsToProject,
removeGroupIdsFromProject,
removeGroupFromAllProjects,
} = require('./Project');
const { Prompt, PromptGroup } = require('./schema/promptSchema');
const mongoose = require('mongoose');
const { logger } = require('~/config');
/**
* Create a pipeline for the aggregation to get prompt groups
* @param {Object} query
* @param {number} skip
* @param {number} limit
* @returns {[Object]} - The pipeline for the aggregation
*/
const createGroupPipeline = (query, skip, limit) => {
return [
{ $match: query },
{ $sort: { createdAt: -1 } },
{ $skip: skip },
{ $limit: limit },
{
$lookup: {
from: 'prompts',
localField: 'productionId',
foreignField: '_id',
as: 'productionPrompt',
},
const promptSchema = mongoose.Schema(
{
title: {
type: String,
required: true,
},
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
{
$project: {
name: 1,
numberOfGenerations: 1,
oneliner: 1,
category: 1,
projectIds: 1,
productionId: 1,
author: 1,
authorName: 1,
createdAt: 1,
updatedAt: 1,
'productionPrompt.prompt': 1,
// 'productionPrompt._id': 1,
// 'productionPrompt.type': 1,
},
prompt: {
type: String,
required: true,
},
category: {
type: String,
},
];
};
/**
* Create a pipeline for the aggregation to get all prompt groups
* @param {Object} query
* @param {Partial<MongoPromptGroup>} $project
* @returns {[Object]} - The pipeline for the aggregation
*/
const createAllGroupsPipeline = (
query,
$project = {
name: 1,
oneliner: 1,
category: 1,
author: 1,
authorName: 1,
createdAt: 1,
updatedAt: 1,
command: 1,
'productionPrompt.prompt': 1,
},
) => {
return [
{ $match: query },
{ $sort: { createdAt: -1 } },
{
$lookup: {
from: 'prompts',
localField: 'productionId',
foreignField: '_id',
as: 'productionPrompt',
},
},
{ $unwind: { path: '$productionPrompt', preserveNullAndEmptyArrays: true } },
{
$project,
},
];
};
{ timestamps: true },
);
/**
* Get all prompt groups with filters
* @param {Object} req
* @param {TPromptGroupsWithFilterRequest} filter
* @returns {Promise<PromptGroupListResponse>}
*/
const getAllPromptGroups = async (req, filter) => {
try {
const { name, ...query } = filter;
if (!query.author) {
throw new Error('Author is required');
}
let searchShared = true;
let searchSharedOnly = false;
if (name) {
query.name = new RegExp(name, 'i');
}
if (!query.category) {
delete query.category;
} else if (query.category === SystemCategories.MY_PROMPTS) {
searchShared = false;
delete query.category;
} else if (query.category === SystemCategories.NO_CATEGORY) {
query.category = '';
} else if (query.category === SystemCategories.SHARED_PROMPTS) {
searchSharedOnly = true;
delete query.category;
}
let combinedQuery = query;
if (searchShared) {
const project = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, 'promptGroupIds');
if (project && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
}
}
const promptGroupsPipeline = createAllGroupsPipeline(combinedQuery);
return await PromptGroup.aggregate(promptGroupsPipeline).exec();
} catch (error) {
console.error('Error getting all prompt groups', error);
return { message: 'Error getting all prompt groups' };
}
};
/**
* Get prompt groups with filters
* @param {Object} req
* @param {TPromptGroupsWithFilterRequest} filter
* @returns {Promise<PromptGroupListResponse>}
*/
const getPromptGroups = async (req, filter) => {
try {
const { pageNumber = 1, pageSize = 10, name, ...query } = filter;
const validatedPageNumber = Math.max(parseInt(pageNumber, 10), 1);
const validatedPageSize = Math.max(parseInt(pageSize, 10), 1);
if (!query.author) {
throw new Error('Author is required');
}
let searchShared = true;
let searchSharedOnly = false;
if (name) {
query.name = new RegExp(name, 'i');
}
if (!query.category) {
delete query.category;
} else if (query.category === SystemCategories.MY_PROMPTS) {
searchShared = false;
delete query.category;
} else if (query.category === SystemCategories.NO_CATEGORY) {
query.category = '';
} else if (query.category === SystemCategories.SHARED_PROMPTS) {
searchSharedOnly = true;
delete query.category;
}
let combinedQuery = query;
if (searchShared) {
// const projects = req.user.projects || []; // TODO: handle multiple projects
const project = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, 'promptGroupIds');
if (project && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
}
}
const skip = (validatedPageNumber - 1) * validatedPageSize;
const limit = validatedPageSize;
const promptGroupsPipeline = createGroupPipeline(combinedQuery, skip, limit);
const totalPromptGroupsPipeline = [{ $match: combinedQuery }, { $count: 'total' }];
const [promptGroupsResults, totalPromptGroupsResults] = await Promise.all([
PromptGroup.aggregate(promptGroupsPipeline).exec(),
PromptGroup.aggregate(totalPromptGroupsPipeline).exec(),
]);
const promptGroups = promptGroupsResults;
const totalPromptGroups =
totalPromptGroupsResults.length > 0 ? totalPromptGroupsResults[0].total : 0;
return {
promptGroups,
pageNumber: validatedPageNumber.toString(),
pageSize: validatedPageSize.toString(),
pages: Math.ceil(totalPromptGroups / validatedPageSize).toString(),
};
} catch (error) {
console.error('Error getting prompt groups', error);
return { message: 'Error getting prompt groups' };
}
};
const Prompt = mongoose.models.Prompt || mongoose.model('Prompt', promptSchema);
module.exports = {
getPromptGroups,
getAllPromptGroups,
/**
* Create a prompt and its respective group
* @param {TCreatePromptRecord} saveData
* @returns {Promise<TCreatePromptResponse>}
*/
createPromptGroup: async (saveData) => {
savePrompt: async ({ title, prompt }) => {
try {
const { prompt, group, author, authorName } = saveData;
let newPromptGroup = await PromptGroup.findOneAndUpdate(
{ ...group, author, authorName, productionId: null },
{ $setOnInsert: { ...group, author, authorName, productionId: null } },
{ new: true, upsert: true },
)
.lean()
.select('-__v')
.exec();
const newPrompt = await Prompt.findOneAndUpdate(
{ ...prompt, author, groupId: newPromptGroup._id },
{ $setOnInsert: { ...prompt, author, groupId: newPromptGroup._id } },
{ new: true, upsert: true },
)
.lean()
.select('-__v')
.exec();
newPromptGroup = await PromptGroup.findByIdAndUpdate(
newPromptGroup._id,
{ productionId: newPrompt._id },
{ new: true },
)
.lean()
.select('-__v')
.exec();
return {
prompt: newPrompt,
group: {
...newPromptGroup,
productionPrompt: { prompt: newPrompt.prompt },
},
};
} catch (error) {
logger.error('Error saving prompt group', error);
throw new Error('Error saving prompt group');
}
},
/**
* Save a prompt
* @param {TCreatePromptRecord} saveData
* @returns {Promise<TCreatePromptResponse>}
*/
savePrompt: async (saveData) => {
try {
const { prompt, author } = saveData;
const newPromptData = {
...prompt,
author,
};
/** @type {TPrompt} */
let newPrompt;
try {
newPrompt = await Prompt.create(newPromptData);
} catch (error) {
if (error?.message?.includes('groupId_1_version_1')) {
await Prompt.db.collection('prompts').dropIndex('groupId_1_version_1');
} else {
throw error;
}
newPrompt = await Prompt.create(newPromptData);
}
return { prompt: newPrompt };
await Prompt.create({
title,
prompt,
});
return { title, prompt };
} catch (error) {
logger.error('Error saving prompt', error);
return { message: 'Error saving prompt' };
return { prompt: 'Error saving prompt' };
}
},
getPrompts: async (filter) => {
try {
return await Prompt.find(filter).sort({ createdAt: -1 }).lean();
return await Prompt.find(filter).lean();
} catch (error) {
logger.error('Error getting prompts', error);
return { message: 'Error getting prompts' };
return { prompt: 'Error getting prompts' };
}
},
getPrompt: async (filter) => {
deletePrompts: async (filter) => {
try {
if (filter.groupId) {
filter.groupId = new ObjectId(filter.groupId);
}
return await Prompt.findOne(filter).lean();
return await Prompt.deleteMany(filter);
} catch (error) {
logger.error('Error getting prompt', error);
return { message: 'Error getting prompt' };
}
},
/**
* Get prompt groups with filters
* @param {TGetRandomPromptsRequest} filter
* @returns {Promise<TGetRandomPromptsResponse>}
*/
getRandomPromptGroups: async (filter) => {
try {
const result = await PromptGroup.aggregate([
{
$match: {
category: { $ne: '' },
},
},
{
$group: {
_id: '$category',
promptGroup: { $first: '$$ROOT' },
},
},
{
$replaceRoot: { newRoot: '$promptGroup' },
},
{
$sample: { size: +filter.limit + +filter.skip },
},
{
$skip: +filter.skip,
},
{
$limit: +filter.limit,
},
]);
return { prompts: result };
} catch (error) {
logger.error('Error getting prompt groups', error);
return { message: 'Error getting prompt groups' };
}
},
getPromptGroupsWithPrompts: async (filter) => {
try {
return await PromptGroup.findOne(filter)
.populate({
path: 'prompts',
select: '-_id -__v -user',
})
.select('-_id -__v -user')
.lean();
} catch (error) {
logger.error('Error getting prompt groups', error);
return { message: 'Error getting prompt groups' };
}
},
getPromptGroup: async (filter) => {
try {
return await PromptGroup.findOne(filter).lean();
} catch (error) {
logger.error('Error getting prompt group', error);
return { message: 'Error getting prompt group' };
}
},
/**
* Deletes a prompt and its corresponding prompt group if it is the last prompt in the group.
*
* @param {Object} options - The options for deleting the prompt.
* @param {ObjectId|string} options.promptId - The ID of the prompt to delete.
* @param {ObjectId|string} options.groupId - The ID of the prompt's group.
* @param {ObjectId|string} options.author - The ID of the prompt's author.
* @param {string} options.role - The role of the prompt's author.
* @return {Promise<TDeletePromptResponse>} An object containing the result of the deletion.
* If the prompt was deleted successfully, the object will have a property 'prompt' with the value 'Prompt deleted successfully'.
* If the prompt group was deleted successfully, the object will have a property 'promptGroup' with the message 'Prompt group deleted successfully' and id of the deleted group.
* If there was an error deleting the prompt, the object will have a property 'message' with the value 'Error deleting prompt'.
*/
deletePrompt: async ({ promptId, groupId, author, role }) => {
const query = { _id: promptId, groupId, author };
if (role === SystemRoles.ADMIN) {
delete query.author;
}
const { deletedCount } = await Prompt.deleteOne(query);
if (deletedCount === 0) {
throw new Error('Failed to delete the prompt');
}
const remainingPrompts = await Prompt.find({ groupId })
.select('_id')
.sort({ createdAt: 1 })
.lean();
if (remainingPrompts.length === 0) {
await PromptGroup.deleteOne({ _id: groupId });
await removeGroupFromAllProjects(groupId);
return {
prompt: 'Prompt deleted successfully',
promptGroup: {
message: 'Prompt group deleted successfully',
id: groupId,
},
};
} else {
const promptGroup = await PromptGroup.findById(groupId).lean();
if (promptGroup.productionId.toString() === promptId.toString()) {
await PromptGroup.updateOne(
{ _id: groupId },
{ productionId: remainingPrompts[remainingPrompts.length - 1]._id },
);
}
return { prompt: 'Prompt deleted successfully' };
}
},
/**
* Update prompt group
* @param {Partial<MongoPromptGroup>} filter - Filter to find prompt group
* @param {Partial<MongoPromptGroup>} data - Data to update
* @returns {Promise<TUpdatePromptGroupResponse>}
*/
updatePromptGroup: async (filter, data) => {
try {
const updateOps = {};
if (data.removeProjectIds) {
for (const projectId of data.removeProjectIds) {
await removeGroupIdsFromProject(projectId, [filter._id]);
}
updateOps.$pull = { projectIds: { $in: data.removeProjectIds } };
delete data.removeProjectIds;
}
if (data.projectIds) {
for (const projectId of data.projectIds) {
await addGroupIdsToProject(projectId, [filter._id]);
}
updateOps.$addToSet = { projectIds: { $each: data.projectIds } };
delete data.projectIds;
}
const updateData = { ...data, ...updateOps };
const updatedDoc = await PromptGroup.findOneAndUpdate(filter, updateData, {
new: true,
upsert: false,
});
if (!updatedDoc) {
throw new Error('Prompt group not found');
}
return updatedDoc;
} catch (error) {
logger.error('Error updating prompt group', error);
return { message: 'Error updating prompt group' };
}
},
/**
* Function to make a prompt production based on its ID.
* @param {String} promptId - The ID of the prompt to make production.
* @returns {Object} The result of the production operation.
*/
makePromptProduction: async (promptId) => {
try {
const prompt = await Prompt.findById(promptId).lean();
if (!prompt) {
throw new Error('Prompt not found');
}
await PromptGroup.findByIdAndUpdate(
prompt.groupId,
{ productionId: prompt._id },
{ new: true },
)
.lean()
.exec();
return {
message: 'Prompt production made successfully',
};
} catch (error) {
logger.error('Error making prompt production', error);
return { message: 'Error making prompt production' };
}
},
updatePromptLabels: async (_id, labels) => {
try {
const response = await Prompt.updateOne({ _id }, { $set: { labels } });
if (response.matchedCount === 0) {
return { message: 'Prompt not found' };
}
return { message: 'Prompt labels updated successfully' };
} catch (error) {
logger.error('Error updating prompt labels', error);
return { message: 'Error updating prompt labels' };
}
},
deletePromptGroup: async (_id) => {
try {
const response = await PromptGroup.deleteOne({ _id });
if (response.deletedCount === 0) {
return { promptGroup: 'Prompt group not found' };
}
await Prompt.deleteMany({ groupId: new ObjectId(_id) });
await removeGroupFromAllProjects(_id);
return { promptGroup: 'Prompt group deleted successfully' };
} catch (error) {
logger.error('Error deleting prompt group', error);
return { message: 'Error deleting prompt group' };
logger.error('Error deleting prompts', error);
return { prompt: 'Error deleting prompts' };
}
},
};

View File

@@ -1,171 +0,0 @@
const {
CacheKeys,
SystemRoles,
roleDefaults,
PermissionTypes,
removeNullishValues,
agentPermissionsSchema,
promptPermissionsSchema,
bookmarkPermissionsSchema,
multiConvoPermissionsSchema,
} = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const Role = require('~/models/schema/roleSchema');
const { logger } = require('~/config');
/**
* Retrieve a role by name and convert the found role document to a plain object.
* If the role with the given name doesn't exist and the name is a system defined role, create it and return the lean version.
*
* @param {string} roleName - The name of the role to find or create.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<Object>} A plain object representing the role document.
*/
const getRoleByName = async function (roleName, fieldsToSelect = null) {
try {
const cache = getLogStores(CacheKeys.ROLES);
const cachedRole = await cache.get(roleName);
if (cachedRole) {
return cachedRole;
}
let query = Role.findOne({ name: roleName });
if (fieldsToSelect) {
query = query.select(fieldsToSelect);
}
let role = await query.lean().exec();
if (!role && SystemRoles[roleName]) {
role = roleDefaults[roleName];
role = await new Role(role).save();
await cache.set(roleName, role);
return role.toObject();
}
await cache.set(roleName, role);
return role;
} catch (error) {
throw new Error(`Failed to retrieve or create role: ${error.message}`);
}
};
/**
* Update role values by name.
*
* @param {string} roleName - The name of the role to update.
* @param {Partial<TRole>} updates - The fields to update.
* @returns {Promise<TRole>} Updated role document.
*/
const updateRoleByName = async function (roleName, updates) {
try {
const cache = getLogStores(CacheKeys.ROLES);
const role = await Role.findOneAndUpdate(
{ name: roleName },
{ $set: updates },
{ new: true, lean: true },
)
.select('-__v')
.lean()
.exec();
await cache.set(roleName, role);
return role;
} catch (error) {
throw new Error(`Failed to update role: ${error.message}`);
}
};
const permissionSchemas = {
[PermissionTypes.AGENTS]: agentPermissionsSchema,
[PermissionTypes.PROMPTS]: promptPermissionsSchema,
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema,
[PermissionTypes.MULTI_CONVO]: multiConvoPermissionsSchema,
};
/**
* Updates access permissions for a specific role and multiple permission types.
* @param {SystemRoles} roleName - The role to update.
* @param {Object.<PermissionTypes, Object.<Permissions, boolean>>} permissionsUpdate - Permissions to update and their values.
*/
async function updateAccessPermissions(roleName, permissionsUpdate) {
const updates = {};
for (const [permissionType, permissions] of Object.entries(permissionsUpdate)) {
if (permissionSchemas[permissionType]) {
updates[permissionType] = removeNullishValues(permissions);
}
}
if (Object.keys(updates).length === 0) {
return;
}
try {
const role = await getRoleByName(roleName);
if (!role) {
return;
}
const updatedPermissions = {};
let hasChanges = false;
for (const [permissionType, permissions] of Object.entries(updates)) {
const currentPermissions = role[permissionType] || {};
updatedPermissions[permissionType] = { ...currentPermissions };
for (const [permission, value] of Object.entries(permissions)) {
if (currentPermissions[permission] !== value) {
updatedPermissions[permissionType][permission] = value;
hasChanges = true;
logger.info(
`Updating '${roleName}' role ${permissionType} '${permission}' permission from ${currentPermissions[permission]} to: ${value}`,
);
}
}
}
if (hasChanges) {
await updateRoleByName(roleName, updatedPermissions);
logger.info(`Updated '${roleName}' role permissions`);
} else {
logger.info(`No changes needed for '${roleName}' role permissions`);
}
} catch (error) {
logger.error(`Failed to update ${roleName} role permissions:`, error);
}
}
/**
* Initialize default roles in the system.
* Creates the default roles (ADMIN, USER) if they don't exist in the database.
* Updates existing roles with new permission types if they're missing.
*
* @returns {Promise<void>}
*/
const initializeRoles = async function () {
const defaultRoles = [SystemRoles.ADMIN, SystemRoles.USER];
for (const roleName of defaultRoles) {
let role = await Role.findOne({ name: roleName });
if (!role) {
// Create new role if it doesn't exist
role = new Role(roleDefaults[roleName]);
} else {
// Add missing permission types
let isUpdated = false;
for (const permType of Object.values(PermissionTypes)) {
if (!role[permType]) {
role[permType] = roleDefaults[roleName][permType];
isUpdated = true;
}
}
if (isUpdated) {
await role.save();
}
}
await role.save();
}
};
module.exports = {
getRoleByName,
initializeRoles,
updateRoleByName,
updateAccessPermissions,
};

View File

@@ -1,420 +0,0 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const {
SystemRoles,
PermissionTypes,
roleDefaults,
Permissions,
} = require('librechat-data-provider');
const { updateAccessPermissions, initializeRoles } = require('~/models/Role');
const getLogStores = require('~/cache/getLogStores');
const Role = require('~/models/schema/roleSchema');
// Mock the cache
jest.mock('~/cache/getLogStores', () => {
return jest.fn().mockReturnValue({
get: jest.fn(),
set: jest.fn(),
del: jest.fn(),
});
});
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Role.deleteMany({});
getLogStores.mockClear();
});
describe('updateAccessPermissions', () => {
it('should update permissions when changes are needed', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: true,
},
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: true,
SHARED_GLOBAL: true,
});
});
it('should not update permissions when no changes are needed', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
});
});
it('should handle non-existent roles', async () => {
await updateAccessPermissions('NON_EXISTENT_ROLE', {
[PermissionTypes.PROMPTS]: {
CREATE: true,
},
});
const role = await Role.findOne({ name: 'NON_EXISTENT_ROLE' });
expect(role).toBeNull();
});
it('should update only specified permissions', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: {
SHARED_GLOBAL: true,
},
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: true,
SHARED_GLOBAL: true,
});
});
it('should handle partial updates', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: {
USE: false,
},
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: false,
SHARED_GLOBAL: false,
});
});
it('should update multiple permission types at once', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
[PermissionTypes.BOOKMARKS]: {
USE: true,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { USE: false, SHARED_GLOBAL: true },
[PermissionTypes.BOOKMARKS]: { USE: false },
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: false,
SHARED_GLOBAL: true,
});
expect(updatedRole[PermissionTypes.BOOKMARKS]).toEqual({
USE: false,
});
});
it('should handle updates for a single permission type', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { USE: false, SHARED_GLOBAL: true },
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: false,
SHARED_GLOBAL: true,
});
});
it('should update MULTI_CONVO permissions', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.MULTI_CONVO]: {
USE: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.MULTI_CONVO]: {
USE: true,
},
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.MULTI_CONVO]).toEqual({
USE: true,
});
});
it('should update MULTI_CONVO permissions along with other permission types', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
CREATE: true,
USE: true,
SHARED_GLOBAL: false,
},
[PermissionTypes.MULTI_CONVO]: {
USE: false,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.PROMPTS]: { SHARED_GLOBAL: true },
[PermissionTypes.MULTI_CONVO]: { USE: true },
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.PROMPTS]).toEqual({
CREATE: true,
USE: true,
SHARED_GLOBAL: true,
});
expect(updatedRole[PermissionTypes.MULTI_CONVO]).toEqual({
USE: true,
});
});
it('should not update MULTI_CONVO permissions when no changes are needed', async () => {
await new Role({
name: SystemRoles.USER,
[PermissionTypes.MULTI_CONVO]: {
USE: true,
},
}).save();
await updateAccessPermissions(SystemRoles.USER, {
[PermissionTypes.MULTI_CONVO]: {
USE: true,
},
});
const updatedRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(updatedRole[PermissionTypes.MULTI_CONVO]).toEqual({
USE: true,
});
});
});
describe('initializeRoles', () => {
beforeEach(async () => {
await Role.deleteMany({});
});
it('should create default roles if they do not exist', async () => {
await initializeRoles();
const adminRole = await Role.findOne({ name: SystemRoles.ADMIN }).lean();
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(adminRole).toBeTruthy();
expect(userRole).toBeTruthy();
// Check if all permission types exist
Object.values(PermissionTypes).forEach((permType) => {
expect(adminRole[permType]).toBeDefined();
expect(userRole[permType]).toBeDefined();
});
// Check if permissions match defaults (example for ADMIN role)
expect(adminRole[PermissionTypes.PROMPTS].SHARED_GLOBAL).toBe(true);
expect(adminRole[PermissionTypes.BOOKMARKS].USE).toBe(true);
expect(adminRole[PermissionTypes.AGENTS].CREATE).toBe(true);
});
it('should not modify existing permissions for existing roles', async () => {
const customUserRole = {
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: {
[Permissions.USE]: false,
[Permissions.CREATE]: true,
[Permissions.SHARED_GLOBAL]: true,
},
[PermissionTypes.BOOKMARKS]: {
[Permissions.USE]: false,
},
};
await new Role(customUserRole).save();
await initializeRoles();
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(userRole[PermissionTypes.PROMPTS]).toEqual(customUserRole[PermissionTypes.PROMPTS]);
expect(userRole[PermissionTypes.BOOKMARKS]).toEqual(customUserRole[PermissionTypes.BOOKMARKS]);
expect(userRole[PermissionTypes.AGENTS]).toBeDefined();
});
it('should add new permission types to existing roles', async () => {
const partialUserRole = {
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: roleDefaults[SystemRoles.USER][PermissionTypes.PROMPTS],
[PermissionTypes.BOOKMARKS]: roleDefaults[SystemRoles.USER][PermissionTypes.BOOKMARKS],
};
await new Role(partialUserRole).save();
await initializeRoles();
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(userRole[PermissionTypes.AGENTS]).toBeDefined();
expect(userRole[PermissionTypes.AGENTS].CREATE).toBeDefined();
expect(userRole[PermissionTypes.AGENTS].USE).toBeDefined();
expect(userRole[PermissionTypes.AGENTS].SHARED_GLOBAL).toBeDefined();
});
it('should handle multiple runs without duplicating or modifying data', async () => {
await initializeRoles();
await initializeRoles();
const adminRoles = await Role.find({ name: SystemRoles.ADMIN });
const userRoles = await Role.find({ name: SystemRoles.USER });
expect(adminRoles).toHaveLength(1);
expect(userRoles).toHaveLength(1);
const adminRole = adminRoles[0].toObject();
const userRole = userRoles[0].toObject();
// Check if all permission types exist
Object.values(PermissionTypes).forEach((permType) => {
expect(adminRole[permType]).toBeDefined();
expect(userRole[permType]).toBeDefined();
});
});
it('should update roles with missing permission types from roleDefaults', async () => {
const partialAdminRole = {
name: SystemRoles.ADMIN,
[PermissionTypes.PROMPTS]: {
[Permissions.USE]: false,
[Permissions.CREATE]: false,
[Permissions.SHARED_GLOBAL]: false,
},
[PermissionTypes.BOOKMARKS]: roleDefaults[SystemRoles.ADMIN][PermissionTypes.BOOKMARKS],
};
await new Role(partialAdminRole).save();
await initializeRoles();
const adminRole = await Role.findOne({ name: SystemRoles.ADMIN }).lean();
expect(adminRole[PermissionTypes.PROMPTS]).toEqual(partialAdminRole[PermissionTypes.PROMPTS]);
expect(adminRole[PermissionTypes.AGENTS]).toBeDefined();
expect(adminRole[PermissionTypes.AGENTS].CREATE).toBeDefined();
expect(adminRole[PermissionTypes.AGENTS].USE).toBeDefined();
expect(adminRole[PermissionTypes.AGENTS].SHARED_GLOBAL).toBeDefined();
});
it('should include MULTI_CONVO permissions when creating default roles', async () => {
await initializeRoles();
const adminRole = await Role.findOne({ name: SystemRoles.ADMIN }).lean();
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(adminRole[PermissionTypes.MULTI_CONVO]).toBeDefined();
expect(userRole[PermissionTypes.MULTI_CONVO]).toBeDefined();
// Check if MULTI_CONVO permissions match defaults
expect(adminRole[PermissionTypes.MULTI_CONVO].USE).toBe(
roleDefaults[SystemRoles.ADMIN][PermissionTypes.MULTI_CONVO].USE,
);
expect(userRole[PermissionTypes.MULTI_CONVO].USE).toBe(
roleDefaults[SystemRoles.USER][PermissionTypes.MULTI_CONVO].USE,
);
});
it('should add MULTI_CONVO permissions to existing roles without them', async () => {
const partialUserRole = {
name: SystemRoles.USER,
[PermissionTypes.PROMPTS]: roleDefaults[SystemRoles.USER][PermissionTypes.PROMPTS],
[PermissionTypes.BOOKMARKS]: roleDefaults[SystemRoles.USER][PermissionTypes.BOOKMARKS],
};
await new Role(partialUserRole).save();
await initializeRoles();
const userRole = await Role.findOne({ name: SystemRoles.USER }).lean();
expect(userRole[PermissionTypes.MULTI_CONVO]).toBeDefined();
expect(userRole[PermissionTypes.MULTI_CONVO].USE).toBeDefined();
});
});

View File

@@ -1,6 +1,6 @@
const crypto = require('crypto');
const mongoose = require('mongoose');
const signPayload = require('~/server/services/signPayload');
const { hashToken } = require('~/server/utils/crypto');
const { logger } = require('~/config');
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
@@ -39,7 +39,8 @@ sessionSchema.methods.generateRefreshToken = async function () {
expirationTime: Math.floor((expiresIn - Date.now()) / 1000),
});
this.refreshTokenHash = await hashToken(refreshToken);
const hash = crypto.createHash('sha256');
this.refreshTokenHash = hash.update(refreshToken).digest('hex');
await this.save();

View File

@@ -1,252 +1,89 @@
const { nanoid } = require('nanoid');
const { Constants } = require('librechat-data-provider');
const SharedLink = require('./schema/shareSchema');
const crypto = require('crypto');
const { getMessages } = require('./Message');
const SharedLink = require('./schema/shareSchema');
const logger = require('~/config/winston');
/**
* Anonymizes a conversation ID
* @returns {string} The anonymized conversation ID
*/
function anonymizeConvoId() {
return `convo_${nanoid()}`;
}
module.exports = {
SharedLink,
getSharedMessages: async (shareId) => {
try {
const share = await SharedLink.findOne({ shareId })
.populate({
path: 'messages',
select: '-_id -__v -user',
})
.select('-_id -__v -user')
.lean();
/**
* Anonymizes an assistant ID
* @returns {string} The anonymized assistant ID
*/
function anonymizeAssistantId() {
return `a_${nanoid()}`;
}
if (!share || !share.conversationId || !share.isPublic) {
return null;
}
/**
* Anonymizes a message ID
* @param {string} id - The original message ID
* @returns {string} The anonymized message ID
*/
function anonymizeMessageId(id) {
return id === Constants.NO_PARENT ? id : `msg_${nanoid()}`;
}
/**
* Anonymizes a conversation object
* @param {object} conversation - The conversation object
* @returns {object} The anonymized conversation object
*/
function anonymizeConvo(conversation) {
const newConvo = { ...conversation };
if (newConvo.assistant_id) {
newConvo.assistant_id = anonymizeAssistantId();
}
return newConvo;
}
/**
* Anonymizes messages in a conversation
* @param {TMessage[]} messages - The original messages
* @param {string} newConvoId - The new conversation ID
* @returns {TMessage[]} The anonymized messages
*/
function anonymizeMessages(messages, newConvoId) {
const idMap = new Map();
return messages.map((message) => {
const newMessageId = anonymizeMessageId(message.messageId);
idMap.set(message.messageId, newMessageId);
const anonymizedMessage = Object.assign(message, {
messageId: newMessageId,
parentMessageId:
idMap.get(message.parentMessageId) || anonymizeMessageId(message.parentMessageId),
conversationId: newConvoId,
});
if (anonymizedMessage.model && anonymizedMessage.model.startsWith('asst_')) {
anonymizedMessage.model = anonymizeAssistantId();
return share;
} catch (error) {
logger.error('[getShare] Error getting share link', error);
return { message: 'Error getting share link' };
}
},
return anonymizedMessage;
});
}
/**
* Retrieves shared messages for a given share ID
* @param {string} shareId - The share ID
* @returns {Promise<object|null>} The shared conversation data or null if not found
*/
async function getSharedMessages(shareId) {
try {
const share = await SharedLink.findOne({ shareId })
.populate({
path: 'messages',
select: '-_id -__v -user',
})
.select('-_id -__v -user')
.lean();
if (!share || !share.conversationId || !share.isPublic) {
return null;
}
const newConvoId = anonymizeConvoId();
return Object.assign(share, {
conversationId: newConvoId,
messages: anonymizeMessages(share.messages, newConvoId),
});
} catch (error) {
logger.error('[getShare] Error getting share link', error);
throw new Error('Error getting share link');
}
}
/**
* Retrieves shared links for a user
* @param {string} user - The user ID
* @param {number} [pageNumber=1] - The page number
* @param {number} [pageSize=25] - The page size
* @param {boolean} [isPublic=true] - Whether to retrieve public links only
* @returns {Promise<object>} The shared links and pagination data
*/
async function getSharedLinks(user, pageNumber = 1, pageSize = 25, isPublic = true) {
const query = { user, isPublic };
try {
const [totalConvos, sharedLinks] = await Promise.all([
SharedLink.countDocuments(query),
SharedLink.find(query)
getSharedLinks: async (user, pageNumber = 1, pageSize = 25, isPublic = true) => {
const query = { user, isPublic };
try {
const totalConvos = (await SharedLink.countDocuments(query)) || 1;
const totalPages = Math.ceil(totalConvos / pageSize);
const shares = await SharedLink.find(query)
.sort({ updatedAt: -1 })
.skip((pageNumber - 1) * pageSize)
.limit(pageSize)
.select('-_id -__v -user')
.lean(),
]);
.lean();
const totalPages = Math.ceil((totalConvos || 1) / pageSize);
return { sharedLinks: shares, pages: totalPages, pageNumber, pageSize };
} catch (error) {
logger.error('[getShareByPage] Error getting shares', error);
return { message: 'Error getting shares' };
}
},
return {
sharedLinks,
pages: totalPages,
pageNumber,
pageSize,
};
} catch (error) {
logger.error('[getShareByPage] Error getting shares', error);
throw new Error('Error getting shares');
}
}
/**
* Creates a new shared link
* @param {string} user - The user ID
* @param {object} shareData - The share data
* @param {string} shareData.conversationId - The conversation ID
* @returns {Promise<object>} The created shared link
*/
async function createSharedLink(user, { conversationId, ...shareData }) {
try {
createSharedLink: async (user, { conversationId, ...shareData }) => {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (share) {
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(share);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(share.messages, newConvoId),
});
return share;
}
const shareId = nanoid();
const messages = await getMessages({ conversationId });
const update = { ...shareData, shareId, messages, user };
const newShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
new: true,
upsert: true,
}).lean();
try {
const shareId = crypto.randomUUID();
const messages = await getMessages({ conversationId });
const update = { ...shareData, shareId, messages, user };
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
new: true,
upsert: true,
});
} catch (error) {
logger.error('[saveShareMessage] Error saving conversation', error);
return { message: 'Error saving conversation' };
}
},
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(newShare);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(newShare.messages, newConvoId),
});
} catch (error) {
logger.error('[createSharedLink] Error creating shared link', error);
throw new Error('Error creating shared link');
}
}
/**
* Updates an existing shared link
* @param {string} user - The user ID
* @param {object} shareData - The share data to update
* @param {string} shareData.conversationId - The conversation ID
* @returns {Promise<object>} The updated shared link
*/
async function updateSharedLink(user, { conversationId, ...shareData }) {
try {
updateSharedLink: async (user, { conversationId, ...shareData }) => {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (!share) {
return { message: 'Share not found' };
}
// update messages to the latest
const messages = await getMessages({ conversationId });
const update = { ...shareData, messages, user };
const updatedShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
new: true,
upsert: false,
}).lean();
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(updatedShare);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(updatedShare.messages, newConvoId),
});
} catch (error) {
logger.error('[updateSharedLink] Error updating shared link', error);
throw new Error('Error updating shared link');
}
}
},
/**
* Deletes a shared link
* @param {string} user - The user ID
* @param {object} params - The deletion parameters
* @param {string} params.shareId - The share ID to delete
* @returns {Promise<object>} The result of the deletion
*/
async function deleteSharedLink(user, { shareId }) {
try {
const result = await SharedLink.findOneAndDelete({ shareId, user });
return result ? { message: 'Share deleted successfully' } : { message: 'Share not found' };
} catch (error) {
logger.error('[deleteSharedLink] Error deleting shared link', error);
throw new Error('Error deleting shared link');
}
}
/**
* Deletes all shared links for a specific user
* @param {string} user - The user ID
* @returns {Promise<object>} The result of the deletion
*/
async function deleteAllSharedLinks(user) {
try {
const result = await SharedLink.deleteMany({ user });
return {
message: 'All shared links have been deleted successfully',
deletedCount: result.deletedCount,
};
} catch (error) {
logger.error('[deleteAllSharedLinks] Error deleting shared links', error);
throw new Error('Error deleting shared links');
}
}
module.exports = {
SharedLink,
getSharedLinks,
createSharedLink,
updateSharedLink,
deleteSharedLink,
getSharedMessages,
deleteAllSharedLinks,
deleteSharedLink: async (user, { shareId }) => {
const share = await SharedLink.findOne({ shareId, user });
if (!share) {
return { message: 'Share not found' };
}
return await SharedLink.findOneAndDelete({ shareId, user });
},
};

View File

@@ -1,117 +0,0 @@
const tokenSchema = require('./schema/tokenSchema');
const mongoose = require('mongoose');
const { logger } = require('~/config');
/**
* Token model.
* @type {mongoose.Model}
*/
const Token = mongoose.model('Token', tokenSchema);
/**
* Creates a new Token instance.
* @param {Object} tokenData - The data for the new Token.
* @param {mongoose.Types.ObjectId} tokenData.userId - The user's ID. It is required.
* @param {String} tokenData.email - The user's email.
* @param {String} tokenData.token - The token. It is required.
* @param {Number} tokenData.expiresIn - The number of seconds until the token expires.
* @returns {Promise<mongoose.Document>} The new Token instance.
* @throws Will throw an error if token creation fails.
*/
async function createToken(tokenData) {
try {
const currentTime = new Date();
const expiresAt = new Date(currentTime.getTime() + tokenData.expiresIn * 1000);
const newTokenData = {
...tokenData,
createdAt: currentTime,
expiresAt,
};
const newToken = new Token(newTokenData);
return await newToken.save();
} catch (error) {
logger.debug('An error occurred while creating token:', error);
throw error;
}
}
/**
* Finds a Token document that matches the provided query.
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} query.email - The email of the user.
* @returns {Promise<Object|null>} The matched Token document, or null if not found.
* @throws Will throw an error if the find operation fails.
*/
async function findToken(query) {
try {
const conditions = [];
if (query.userId) {
conditions.push({ userId: query.userId });
}
if (query.token) {
conditions.push({ token: query.token });
}
if (query.email) {
conditions.push({ email: query.email });
}
const token = await Token.findOne({
$and: conditions,
}).lean();
return token;
} catch (error) {
logger.debug('An error occurred while finding token:', error);
throw error;
}
}
/**
* Updates a Token document that matches the provided query.
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {Object} updateData - The data to update the Token with.
* @returns {Promise<mongoose.Document|null>} The updated Token document, or null if not found.
* @throws Will throw an error if the update operation fails.
*/
async function updateToken(query, updateData) {
try {
return await Token.findOneAndUpdate(query, updateData, { new: true });
} catch (error) {
logger.debug('An error occurred while updating token:', error);
throw error;
}
}
/**
* Deletes all Token documents that match the provided token, user ID, or email.
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} query.email - The email of the user.
* @returns {Promise<Object>} The result of the delete operation.
* @throws Will throw an error if the delete operation fails.
*/
async function deleteTokens(query) {
try {
return await Token.deleteMany({
$or: [{ userId: query.userId }, { token: query.token }, { email: query.email }],
});
} catch (error) {
logger.debug('An error occurred while deleting tokens:', error);
throw error;
}
}
module.exports = {
createToken,
findToken,
updateToken,
deleteTokens,
};

View File

@@ -1,12 +1,12 @@
const mongoose = require('mongoose');
const { isEnabled } = require('~/server/utils/handleText');
const { isEnabled } = require('../server/utils/handleText');
const transactionSchema = require('./schema/transaction');
const { getMultiplier, getCacheMultiplier } = require('./tx');
const { getMultiplier } = require('./tx');
const { logger } = require('~/config');
const Balance = require('./Balance');
const cancelRate = 1.15;
/** Method to calculate and set the tokenValue for a transaction */
// Method to calculate and set the tokenValue for a transaction
transactionSchema.methods.calculateTokenValue = function () {
if (!this.valueKey || !this.tokenType) {
this.tokenValue = this.rawAmount;
@@ -21,17 +21,15 @@ transactionSchema.methods.calculateTokenValue = function () {
}
};
/**
* Static method to create a transaction and update the balance
* @param {txData} txData - Transaction data.
*/
transactionSchema.statics.create = async function (txData) {
// Static method to create a transaction and update the balance
transactionSchema.statics.create = async function (transactionData) {
const Transaction = this;
const transaction = new Transaction(txData);
transaction.endpointTokenConfig = txData.endpointTokenConfig;
const transaction = new Transaction(transactionData);
transaction.endpointTokenConfig = transactionData.endpointTokenConfig;
transaction.calculateTokenValue();
// Save the transaction
await transaction.save();
if (!isEnabled(process.env.CHECK_BALANCE)) {
@@ -59,109 +57,6 @@ transactionSchema.statics.create = async function (txData) {
};
};
/**
* Static method to create a structured transaction and update the balance
* @param {txData} txData - Transaction data.
*/
transactionSchema.statics.createStructured = async function (txData) {
const Transaction = this;
const transaction = new Transaction({
...txData,
endpointTokenConfig: txData.endpointTokenConfig,
});
transaction.calculateStructuredTokenValue();
await transaction.save();
if (!isEnabled(process.env.CHECK_BALANCE)) {
return;
}
let balance = await Balance.findOne({ user: transaction.user }).lean();
let incrementValue = transaction.tokenValue;
if (balance && balance?.tokenCredits + incrementValue < 0) {
incrementValue = -balance.tokenCredits;
}
balance = await Balance.findOneAndUpdate(
{ user: transaction.user },
{ $inc: { tokenCredits: incrementValue } },
{ upsert: true, new: true },
).lean();
return {
rate: transaction.rate,
user: transaction.user.toString(),
balance: balance.tokenCredits,
[transaction.tokenType]: incrementValue,
};
};
/** Method to calculate token value for structured tokens */
transactionSchema.methods.calculateStructuredTokenValue = function () {
if (!this.tokenType) {
this.tokenValue = this.rawAmount;
return;
}
const { model, endpointTokenConfig } = this;
if (this.tokenType === 'prompt') {
const inputMultiplier = getMultiplier({ tokenType: 'prompt', model, endpointTokenConfig });
const writeMultiplier =
getCacheMultiplier({ cacheType: 'write', model, endpointTokenConfig }) ?? inputMultiplier;
const readMultiplier =
getCacheMultiplier({ cacheType: 'read', model, endpointTokenConfig }) ?? inputMultiplier;
this.rateDetail = {
input: inputMultiplier,
write: writeMultiplier,
read: readMultiplier,
};
const totalPromptTokens =
Math.abs(this.inputTokens || 0) +
Math.abs(this.writeTokens || 0) +
Math.abs(this.readTokens || 0);
if (totalPromptTokens > 0) {
this.rate =
(Math.abs(inputMultiplier * (this.inputTokens || 0)) +
Math.abs(writeMultiplier * (this.writeTokens || 0)) +
Math.abs(readMultiplier * (this.readTokens || 0))) /
totalPromptTokens;
} else {
this.rate = Math.abs(inputMultiplier); // Default to input rate if no tokens
}
this.tokenValue = -(
Math.abs(this.inputTokens || 0) * inputMultiplier +
Math.abs(this.writeTokens || 0) * writeMultiplier +
Math.abs(this.readTokens || 0) * readMultiplier
);
this.rawAmount = -totalPromptTokens;
} else if (this.tokenType === 'completion') {
const multiplier = getMultiplier({ tokenType: this.tokenType, model, endpointTokenConfig });
this.rate = Math.abs(multiplier);
this.tokenValue = -Math.abs(this.rawAmount) * multiplier;
this.rawAmount = -Math.abs(this.rawAmount);
}
if (this.context && this.tokenType === 'completion' && this.context === 'incomplete') {
this.tokenValue = Math.ceil(this.tokenValue * cancelRate);
this.rate *= cancelRate;
if (this.rateDetail) {
this.rateDetail = Object.fromEntries(
Object.entries(this.rateDetail).map(([k, v]) => [k, v * cancelRate]),
);
}
}
};
const Transaction = mongoose.model('Transaction', transactionSchema);
/**

View File

@@ -1,348 +0,0 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const Balance = require('./Balance');
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { getMultiplier, getCacheMultiplier } = require('./tx');
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await mongoose.connection.dropDatabase();
});
describe('Regular Token Spending Tests', () => {
test('Balance should decrease when spending tokens with spendTokens', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 50,
};
// Act
process.env.CHECK_BALANCE = 'true';
await spendTokens(txData, tokenUsage);
// Assert
console.log('Initial Balance:', initialBalance);
const updatedBalance = await Balance.findOne({ user: userId });
console.log('Updated Balance:', updatedBalance.tokenCredits);
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
const expectedPromptCost = tokenUsage.promptTokens * promptMultiplier;
const expectedCompletionCost = tokenUsage.completionTokens * completionMultiplier;
const expectedTotalCost = expectedPromptCost + expectedCompletionCost;
const expectedBalance = initialBalance - expectedTotalCost;
expect(updatedBalance.tokenCredits).toBeLessThan(initialBalance);
expect(updatedBalance.tokenCredits).toBeCloseTo(expectedBalance, 0);
console.log('Expected Total Cost:', expectedTotalCost);
console.log('Actual Balance Decrease:', initialBalance - updatedBalance.tokenCredits);
});
test('spendTokens should handle zero completion tokens', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 0,
};
// Act
process.env.CHECK_BALANCE = 'true';
await spendTokens(txData, tokenUsage);
// Assert
const updatedBalance = await Balance.findOne({ user: userId });
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const expectedCost = tokenUsage.promptTokens * promptMultiplier;
expect(updatedBalance.tokenCredits).toBeCloseTo(initialBalance - expectedCost, 0);
console.log('Initial Balance:', initialBalance);
console.log('Updated Balance:', updatedBalance.tokenCredits);
console.log('Expected Cost:', expectedCost);
});
test('spendTokens should handle undefined token counts', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
};
const tokenUsage = {};
const result = await spendTokens(txData, tokenUsage);
expect(result).toBeUndefined();
});
test('spendTokens should handle only prompt tokens', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000; // $10.00
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
};
const tokenUsage = { promptTokens: 100 };
await spendTokens(txData, tokenUsage);
const updatedBalance = await Balance.findOne({ user: userId });
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const expectedCost = 100 * promptMultiplier;
expect(updatedBalance.tokenCredits).toBeCloseTo(initialBalance - expectedCost, 0);
});
});
describe('Structured Token Spending Tests', () => {
test('Balance should decrease and rawAmount should be set when spending a large number of structured tokens', async () => {
// Arrange
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55; // $17.61
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'claude-3-5-sonnet';
const txData = {
user: userId,
conversationId: 'c23a18da-706c-470a-ac28-ec87ed065199',
model,
context: 'message',
endpointTokenConfig: null, // We'll use the default rates
};
const tokenUsage = {
promptTokens: {
input: 11,
write: 140522,
read: 0,
},
completionTokens: 5,
};
// Get the actual multipliers
const promptMultiplier = getMultiplier({ model, tokenType: 'prompt' });
const completionMultiplier = getMultiplier({ model, tokenType: 'completion' });
const writeMultiplier = getCacheMultiplier({ model, cacheType: 'write' });
const readMultiplier = getCacheMultiplier({ model, cacheType: 'read' });
console.log('Multipliers:', {
promptMultiplier,
completionMultiplier,
writeMultiplier,
readMultiplier,
});
// Act
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
// Assert
console.log('Initial Balance:', initialBalance);
console.log('Updated Balance:', result.completion.balance);
console.log('Transaction Result:', result);
const expectedPromptCost =
tokenUsage.promptTokens.input * promptMultiplier +
tokenUsage.promptTokens.write * writeMultiplier +
tokenUsage.promptTokens.read * readMultiplier;
const expectedCompletionCost = tokenUsage.completionTokens * completionMultiplier;
const expectedTotalCost = expectedPromptCost + expectedCompletionCost;
const expectedBalance = initialBalance - expectedTotalCost;
console.log('Expected Cost:', expectedTotalCost);
console.log('Expected Balance:', expectedBalance);
expect(result.completion.balance).toBeLessThan(initialBalance);
// Allow for a small difference (e.g., 100 token credits, which is $0.0001)
const allowedDifference = 100;
expect(Math.abs(result.completion.balance - expectedBalance)).toBeLessThan(allowedDifference);
// Check if the decrease is approximately as expected
const balanceDecrease = initialBalance - result.completion.balance;
expect(balanceDecrease).toBeCloseTo(expectedTotalCost, 0);
// Check token values
const expectedPromptTokenValue = -(
tokenUsage.promptTokens.input * promptMultiplier +
tokenUsage.promptTokens.write * writeMultiplier +
tokenUsage.promptTokens.read * readMultiplier
);
const expectedCompletionTokenValue = -tokenUsage.completionTokens * completionMultiplier;
expect(result.prompt.prompt).toBeCloseTo(expectedPromptTokenValue, 1);
expect(result.completion.completion).toBe(expectedCompletionTokenValue);
console.log('Expected prompt tokenValue:', expectedPromptTokenValue);
console.log('Actual prompt tokenValue:', result.prompt.prompt);
console.log('Expected completion tokenValue:', expectedCompletionTokenValue);
console.log('Actual completion tokenValue:', result.completion.completion);
});
test('should handle zero completion tokens in structured spending', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'claude-3-5-sonnet';
const txData = {
user: userId,
conversationId: 'test-convo',
model,
context: 'message',
};
const tokenUsage = {
promptTokens: {
input: 10,
write: 100,
read: 5,
},
completionTokens: 0,
};
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
expect(result.prompt).toBeDefined();
expect(result.completion).toBeUndefined();
expect(result.prompt.prompt).toBeLessThan(0);
});
test('should handle only prompt tokens in structured spending', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'claude-3-5-sonnet';
const txData = {
user: userId,
conversationId: 'test-convo',
model,
context: 'message',
};
const tokenUsage = {
promptTokens: {
input: 10,
write: 100,
read: 5,
},
};
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
expect(result.prompt).toBeDefined();
expect(result.completion).toBeUndefined();
expect(result.prompt.prompt).toBeLessThan(0);
});
test('should handle undefined token counts in structured spending', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'claude-3-5-sonnet';
const txData = {
user: userId,
conversationId: 'test-convo',
model,
context: 'message',
};
const tokenUsage = {};
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
expect(result).toEqual({
prompt: undefined,
completion: undefined,
});
});
test('should handle incomplete context for completion tokens', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 17613154.55;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'claude-3-5-sonnet';
const txData = {
user: userId,
conversationId: 'test-convo',
model,
context: 'incomplete',
};
const tokenUsage = {
promptTokens: {
input: 10,
write: 100,
read: 5,
},
completionTokens: 50,
};
process.env.CHECK_BALANCE = 'true';
const result = await spendStructuredTokens(txData, tokenUsage);
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0); // Assuming multiplier is 15 and cancelRate is 1.15
});
});

View File

@@ -1,5 +1,61 @@
const mongoose = require('mongoose');
const userSchema = require('~/models/schema/userSchema');
const bcrypt = require('bcryptjs');
const signPayload = require('../server/services/signPayload');
const userSchema = require('./schema/userSchema.js');
const { SESSION_EXPIRY } = process.env ?? {};
const expires = eval(SESSION_EXPIRY) ?? 1000 * 60 * 15;
userSchema.methods.toJSON = function () {
return {
id: this._id,
provider: this.provider,
email: this.email,
name: this.name,
username: this.username,
avatar: this.avatar,
role: this.role,
emailVerified: this.emailVerified,
plugins: this.plugins,
createdAt: this.createdAt,
updatedAt: this.updatedAt,
};
};
userSchema.methods.generateToken = async function () {
return await signPayload({
payload: {
id: this._id,
username: this.username,
provider: this.provider,
email: this.email,
},
secret: process.env.JWT_SECRET,
expirationTime: expires / 1000,
});
};
userSchema.methods.comparePassword = function (candidatePassword, callback) {
bcrypt.compare(candidatePassword, this.password, (err, isMatch) => {
if (err) {
return callback(err);
}
callback(null, isMatch);
});
};
module.exports.hashPassword = async (password) => {
const hashedPassword = await new Promise((resolve, reject) => {
bcrypt.hash(password, 10, function (err, hash) {
if (err) {
reject(err);
} else {
resolve(hash);
}
});
});
return hashedPassword;
};
const User = mongoose.model('User', userSchema);

View File

@@ -1,22 +1,3 @@
const {
comparePassword,
deleteUserById,
generateToken,
getUserById,
updateUser,
createUser,
countUsers,
findUser,
} = require('./userMethods');
const {
findFileById,
createFile,
updateFile,
deleteFile,
deleteFiles,
getFiles,
updateFileUsage,
} = require('./File');
const {
getMessages,
saveMessage,
@@ -27,22 +8,8 @@ const {
} = require('./Message');
const { getConvoTitle, getConvo, saveConvo, deleteConvos } = require('./Conversation');
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
const { createToken, findToken, updateToken, deleteTokens } = require('./Token');
const Session = require('./Session');
const Balance = require('./Balance');
const User = require('./User');
const Key = require('./Key');
module.exports = {
comparePassword,
deleteUserById,
generateToken,
getUserById,
updateUser,
createUser,
countUsers,
findUser,
const { hashPassword, getUser, updateUser } = require('./userMethods');
const {
findFileById,
createFile,
updateFile,
@@ -50,6 +17,21 @@ module.exports = {
deleteFiles,
getFiles,
updateFileUsage,
} = require('./File');
const Key = require('./Key');
const User = require('./User');
const Session = require('./Session');
const Balance = require('./Balance');
module.exports = {
User,
Key,
Session,
Balance,
hashPassword,
updateUser,
getUser,
getMessages,
saveMessage,
@@ -68,13 +50,11 @@ module.exports = {
savePreset,
deletePresets,
createToken,
findToken,
updateToken,
deleteTokens,
User,
Key,
Session,
Balance,
findFileById,
createFile,
updateFile,
deleteFile,
deleteFiles,
getFiles,
updateFileUsage,
};

View File

@@ -1,70 +0,0 @@
const crypto = require('crypto');
const bcrypt = require('bcryptjs');
const mongoose = require('mongoose');
const { createToken, findToken } = require('./Token');
const logger = require('~/config/winston');
/**
* @module inviteUser
* @description This module provides functions to create and get user invites
*/
/**
* @function createInvite
* @description This function creates a new user invite
* @param {string} email - The email of the user to invite
* @returns {Promise<Object>} A promise that resolves to the saved invite document
* @throws {Error} If there is an error creating the invite
*/
const createInvite = async (email) => {
try {
let token = crypto.randomBytes(32).toString('hex');
const hash = bcrypt.hashSync(token, 10);
const encodedToken = encodeURIComponent(token);
const fakeUserId = new mongoose.Types.ObjectId();
await createToken({
userId: fakeUserId,
email,
token: hash,
createdAt: Date.now(),
expiresIn: 604800,
});
return encodedToken;
} catch (error) {
logger.error('[createInvite] Error creating invite', error);
return { message: 'Error creating invite' };
}
};
/**
* @function getInvite
* @description This function retrieves a user invite
* @param {string} encodedToken - The token of the invite to retrieve
* @param {string} email - The email of the user to validate
* @returns {Promise<Object>} A promise that resolves to the retrieved invite document
* @throws {Error} If there is an error retrieving the invite, if the invite does not exist, or if the email does not match
*/
const getInvite = async (encodedToken, email) => {
try {
const token = decodeURIComponent(encodedToken);
const hash = bcrypt.hashSync(token, 10);
const invite = await findToken({ token: hash, email });
if (!invite) {
throw new Error('Invite not found or email does not match');
}
return invite;
} catch (error) {
logger.error('[getInvite] Error getting invite', error);
return { error: true, message: error.message };
}
};
module.exports = {
createInvite,
getInvite,
};

View File

@@ -39,7 +39,6 @@ const actionSchema = new Schema({
default: 'action_prototype',
},
settings: Schema.Types.Mixed,
agent_id: String,
assistant_id: String,
metadata: {
api_key: String, // private, encrypted

View File

@@ -1,71 +0,0 @@
const mongoose = require('mongoose');
const agentSchema = mongoose.Schema(
{
id: {
type: String,
index: true,
required: true,
},
name: {
type: String,
},
description: {
type: String,
},
instructions: {
type: String,
},
avatar: {
type: {
filepath: String,
source: String,
},
default: undefined,
},
provider: {
type: String,
required: true,
},
model: {
type: String,
required: true,
},
model_parameters: {
type: Object,
},
access_level: {
type: Number,
},
tools: {
type: [String],
default: undefined,
},
tool_kwargs: {
type: [{ type: mongoose.Schema.Types.Mixed }],
},
file_ids: {
type: [String],
default: undefined,
},
actions: {
type: [String],
default: undefined,
},
author: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
projectIds: {
type: [mongoose.Schema.Types.ObjectId],
ref: 'Project',
index: true,
},
},
{
timestamps: true,
},
);
module.exports = agentSchema;

View File

@@ -19,10 +19,6 @@ const assistantSchema = mongoose.Schema(
},
default: undefined,
},
conversation_starters: {
type: [String],
default: [],
},
access_level: {
type: Number,
},

View File

@@ -1,19 +0,0 @@
const mongoose = require('mongoose');
const Schema = mongoose.Schema;
const categoriesSchema = new Schema({
label: {
type: String,
required: true,
unique: true,
},
value: {
type: String,
required: true,
unique: true,
},
});
const categories = mongoose.model('categories', categoriesSchema);
module.exports = { Categories: categories };

View File

@@ -1,31 +0,0 @@
const mongoose = require('mongoose');
const conversationTagSchema = mongoose.Schema(
{
tag: {
type: String,
index: true,
},
user: {
type: String,
index: true,
},
description: {
type: String,
index: true,
},
count: {
type: Number,
default: 0,
},
position: {
type: Number,
default: 0,
},
},
{ timestamps: true },
);
conversationTagSchema.index({ tag: 1, user: 1 }, { unique: true });
module.exports = mongoose.model('ConversationTag', conversationTagSchema);

View File

@@ -42,11 +42,6 @@ const convoSchema = mongoose.Schema(
invocationId: {
type: Number,
},
tags: {
type: [String],
default: [],
meiliIndex: true,
},
},
{ timestamps: true },
);
@@ -61,7 +56,6 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
}
convoSchema.index({ createdAt: 1, updatedAt: 1 });
convoSchema.index({ conversationId: 1, user: 1 }, { unique: true });
const Conversation = mongoose.models.Conversation || mongoose.model('Conversation', convoSchema);

View File

@@ -13,11 +13,6 @@ const conversationPreset = {
type: String,
required: false,
},
// for bedrock only
region: {
type: String,
required: false,
},
// for azureOpenAI, openAI only
chatGptLabel: {
type: String,
@@ -79,13 +74,6 @@ const conversationPreset = {
resendImages: {
type: Boolean,
},
/* Anthropic only */
promptCache: {
type: Boolean,
},
system: {
type: String,
},
// files
resendFiles: {
type: Boolean,
@@ -115,10 +103,6 @@ const conversationPreset = {
spec: {
type: String,
},
tags: {
type: [String],
default: [],
},
tools: { type: [{ type: String }], default: undefined },
maxContextTokens: {
type: Number,

View File

@@ -3,9 +3,9 @@ const mongoose = require('mongoose');
/**
* @typedef {Object} MongoFile
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {mongoose.Schema.Types.ObjectId} [_id] - MongoDB Document ID
* @property {number} [__v] - MongoDB Version Key
* @property {ObjectId} user - User ID
* @property {mongoose.Schema.Types.ObjectId} user - User ID
* @property {string} [conversationId] - Optional conversation ID
* @property {string} file_id - File identifier
* @property {string} [temp_file_id] - Temporary File identifier
@@ -14,19 +14,17 @@ const mongoose = require('mongoose');
* @property {string} filepath - Location of the file
* @property {'file'} object - Type of object, always 'file'
* @property {string} type - Type of file
* @property {number} [usage=0] - Number of uses of the file
* @property {number} usage - Number of uses of the file
* @property {string} [context] - Context of the file origin
* @property {boolean} [embedded=false] - Whether or not the file is embedded in vector db
* @property {boolean} [embedded] - Whether or not the file is embedded in vector db
* @property {string} [model] - The model to identify the group region of the file (for Azure OpenAI hosting)
* @property {string} [source] - The source of the file (e.g., from FileSources)
* @property {string} [source] - The source of the file
* @property {number} [width] - Optional width of the file
* @property {number} [height] - Optional height of the file
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {Date} [expiresAt] - Optional height of the file
* @property {Date} [createdAt] - Date when the file was created
* @property {Date} [updatedAt] - Date when the file was updated
*/
/** @type {MongooseSchema<MongoFile>} */
const fileSchema = mongoose.Schema(
{
user: {
@@ -93,7 +91,7 @@ const fileSchema = mongoose.Schema(
height: Number,
expiresAt: {
type: Date,
expires: 3600, // 1 hour in seconds
expires: 3600,
},
},
{

View File

@@ -11,7 +11,6 @@ const messageSchema = mongoose.Schema(
},
conversationId: {
type: String,
index: true,
required: true,
meiliIndex: true,
},
@@ -129,9 +128,7 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
}
messageSchema.index({ createdAt: 1 });
messageSchema.index({ messageId: 1, user: 1 }, { unique: true });
/** @type {mongoose.Model<TMessage>} */
const Message = mongoose.models.Message || mongoose.model('Message', messageSchema);
module.exports = Message;

View File

@@ -1,35 +0,0 @@
const { Schema } = require('mongoose');
/**
* @typedef {Object} MongoProject
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} name - The name of the project
* @property {ObjectId[]} promptGroupIds - Array of PromptGroup IDs associated with the project
* @property {Date} [createdAt] - Date when the project was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the project was last updated (added by timestamps)
*/
const projectSchema = new Schema(
{
name: {
type: String,
required: true,
index: true,
},
promptGroupIds: {
type: [Schema.Types.ObjectId],
ref: 'PromptGroup',
default: [],
},
agentIds: {
type: [String],
ref: 'Agent',
default: [],
},
},
{
timestamps: true,
},
);
module.exports = projectSchema;

View File

@@ -1,118 +0,0 @@
const mongoose = require('mongoose');
const { Constants } = require('librechat-data-provider');
const Schema = mongoose.Schema;
/**
* @typedef {Object} MongoPromptGroup
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} name - The name of the prompt group
* @property {ObjectId} author - The author of the prompt group
* @property {ObjectId} [projectId=null] - The project ID of the prompt group
* @property {ObjectId} [productionId=null] - The project ID of the prompt group
* @property {string} authorName - The name of the author of the prompt group
* @property {number} [numberOfGenerations=0] - Number of generations the prompt group has
* @property {string} [oneliner=''] - Oneliner description of the prompt group
* @property {string} [category=''] - Category of the prompt group
* @property {string} [command] - Command for the prompt group
* @property {Date} [createdAt] - Date when the prompt group was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the prompt group was last updated (added by timestamps)
*/
const promptGroupSchema = new Schema(
{
name: {
type: String,
required: true,
index: true,
},
numberOfGenerations: {
type: Number,
default: 0,
},
oneliner: {
type: String,
default: '',
},
category: {
type: String,
default: '',
index: true,
},
projectIds: {
type: [Schema.Types.ObjectId],
ref: 'Project',
index: true,
},
productionId: {
type: Schema.Types.ObjectId,
ref: 'Prompt',
required: true,
index: true,
},
author: {
type: Schema.Types.ObjectId,
ref: 'User',
required: true,
index: true,
},
authorName: {
type: String,
required: true,
},
command: {
type: String,
index: true,
validate: {
validator: function (v) {
return v === undefined || v === null || v === '' || /^[a-z0-9-]+$/.test(v);
},
message: (props) =>
`${props.value} is not a valid command. Only lowercase alphanumeric characters and highfins (') are allowed.`,
},
maxlength: [
Constants.COMMANDS_MAX_LENGTH,
`Command cannot be longer than ${Constants.COMMANDS_MAX_LENGTH} characters`,
],
},
},
{
timestamps: true,
},
);
const PromptGroup = mongoose.model('PromptGroup', promptGroupSchema);
const promptSchema = new Schema(
{
groupId: {
type: Schema.Types.ObjectId,
ref: 'PromptGroup',
required: true,
index: true,
},
author: {
type: Schema.Types.ObjectId,
ref: 'User',
required: true,
},
prompt: {
type: String,
required: true,
},
type: {
type: String,
enum: ['text', 'chat'],
required: true,
},
},
{
timestamps: true,
},
);
const Prompt = mongoose.model('Prompt', promptSchema);
promptSchema.index({ createdAt: 1, updatedAt: 1 });
promptGroupSchema.index({ createdAt: 1, updatedAt: 1 });
module.exports = { Prompt, PromptGroup };

View File

@@ -1,55 +0,0 @@
const { PermissionTypes, Permissions } = require('librechat-data-provider');
const mongoose = require('mongoose');
const roleSchema = new mongoose.Schema({
name: {
type: String,
required: true,
unique: true,
index: true,
},
[PermissionTypes.BOOKMARKS]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.PROMPTS]: {
[Permissions.SHARED_GLOBAL]: {
type: Boolean,
default: false,
},
[Permissions.USE]: {
type: Boolean,
default: true,
},
[Permissions.CREATE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.AGENTS]: {
[Permissions.SHARED_GLOBAL]: {
type: Boolean,
default: false,
},
[Permissions.USE]: {
type: Boolean,
default: true,
},
[Permissions.CREATE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.MULTI_CONVO]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
});
const Role = mongoose.model('Role', roleSchema);
module.exports = Role;

View File

@@ -7,9 +7,6 @@ const tokenSchema = new Schema({
required: true,
ref: 'user',
},
email: {
type: String,
},
token: {
type: String,
required: true,
@@ -18,13 +15,8 @@ const tokenSchema = new Schema({
type: Date,
required: true,
default: Date.now,
},
expiresAt: {
type: Date,
required: true,
expires: 900,
},
});
tokenSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 });
module.exports = tokenSchema;
module.exports = mongoose.model('Token', tokenSchema);

View File

@@ -30,9 +30,6 @@ const transactionSchema = mongoose.Schema(
rate: Number,
rawAmount: Number,
tokenValue: Number,
inputTokens: { type: Number },
writeTokens: { type: Number },
readTokens: { type: Number },
},
{
timestamps: true,

View File

@@ -1,36 +1,5 @@
const mongoose = require('mongoose');
const { SystemRoles } = require('librechat-data-provider');
/**
* @typedef {Object} MongoSession
* @property {string} [refreshToken] - The refresh token
*/
/**
* @typedef {Object} MongoUser
* @property {ObjectId} [_id] - MongoDB Document ID
* @property {string} [name] - The user's name
* @property {string} [username] - The user's username, in lowercase
* @property {string} email - The user's email address
* @property {boolean} emailVerified - Whether the user's email is verified
* @property {string} [password] - The user's password, trimmed with 8-128 characters
* @property {string} [avatar] - The URL of the user's avatar
* @property {string} provider - The provider of the user's account (e.g., 'local', 'google')
* @property {string} [role='USER'] - The role of the user
* @property {string} [googleId] - Optional Google ID for the user
* @property {string} [facebookId] - Optional Facebook ID for the user
* @property {string} [openidId] - Optional OpenID ID for the user
* @property {string} [ldapId] - Optional LDAP ID for the user
* @property {string} [githubId] - Optional GitHub ID for the user
* @property {string} [discordId] - Optional Discord ID for the user
* @property {Array} [plugins=[]] - List of plugins used by the user
* @property {Array.<MongoSession>} [refreshToken] - List of sessions with refresh tokens
* @property {Date} [expiresAt] - Optional expiration date of the file
* @property {Date} [createdAt] - Date when the user was created (added by timestamps)
* @property {Date} [updatedAt] - Date when the user was last updated (added by timestamps)
*/
/** @type {MongooseSchema<MongoSession>} */
const Session = mongoose.Schema({
refreshToken: {
type: String,
@@ -38,7 +7,6 @@ const Session = mongoose.Schema({
},
});
/** @type {MongooseSchema<MongoUser>} */
const userSchema = mongoose.Schema(
{
name: {
@@ -79,7 +47,7 @@ const userSchema = mongoose.Schema(
},
role: {
type: String,
default: SystemRoles.USER,
default: 'USER',
},
googleId: {
type: String,
@@ -96,11 +64,6 @@ const userSchema = mongoose.Schema(
unique: true,
sparse: true,
},
ldapId: {
type: String,
unique: true,
sparse: true,
},
githubId: {
type: String,
unique: true,
@@ -118,16 +81,7 @@ const userSchema = mongoose.Schema(
refreshToken: {
type: [Session],
},
expiresAt: {
type: Date,
expires: 604800, // 7 days in seconds
},
termsAccepted: {
type: Boolean,
default: false,
},
},
{ timestamps: true },
);

View File

@@ -11,7 +11,7 @@ const { logger } = require('~/config');
* @param {String} txData.conversationId - The ID of the conversation.
* @param {String} txData.model - The model name.
* @param {String} txData.context - The context in which the transaction is made.
* @param {EndpointTokenConfig} [txData.endpointTokenConfig] - The current endpoint token config.
* @param {String} [txData.endpointTokenConfig] - The current endpoint token config.
* @param {String} [txData.valueKey] - The value key (optional).
* @param {Object} tokenUsage - The number of tokens used.
* @param {Number} tokenUsage.promptTokens - The number of prompt tokens used.
@@ -32,109 +32,38 @@ const spendTokens = async (txData, tokenUsage) => {
);
let prompt, completion;
try {
if (promptTokens !== undefined) {
if (promptTokens >= 0) {
prompt = await Transaction.create({
...txData,
tokenType: 'prompt',
rawAmount: -Math.max(promptTokens, 0),
rawAmount: -promptTokens,
});
}
if (completionTokens !== undefined) {
completion = await Transaction.create({
...txData,
tokenType: 'completion',
rawAmount: -Math.max(completionTokens, 0),
});
if (!completionTokens && isNaN(completionTokens)) {
logger.debug('[spendTokens] !completionTokens', { prompt, completion });
return;
}
if (prompt || completion) {
completion = await Transaction.create({
...txData,
tokenType: 'completion',
rawAmount: -completionTokens,
});
prompt &&
completion &&
logger.debug('[spendTokens] Transaction data record against balance:', {
user: txData.user,
prompt: prompt?.prompt,
promptRate: prompt?.rate,
completion: completion?.completion,
completionRate: completion?.rate,
balance: completion?.balance ?? prompt?.balance,
prompt: prompt.prompt,
promptRate: prompt.rate,
completion: completion.completion,
completionRate: completion.rate,
balance: completion.balance,
});
} else {
logger.debug('[spendTokens] No transactions incurred against balance');
}
} catch (err) {
logger.error('[spendTokens]', err);
}
};
/**
* Creates transactions to record the spending of structured tokens.
*
* @function
* @async
* @param {Object} txData - Transaction data.
* @param {mongoose.Schema.Types.ObjectId} txData.user - The user ID.
* @param {String} txData.conversationId - The ID of the conversation.
* @param {String} txData.model - The model name.
* @param {String} txData.context - The context in which the transaction is made.
* @param {EndpointTokenConfig} [txData.endpointTokenConfig] - The current endpoint token config.
* @param {String} [txData.valueKey] - The value key (optional).
* @param {Object} tokenUsage - The number of tokens used.
* @param {Object} tokenUsage.promptTokens - The number of prompt tokens used.
* @param {Number} tokenUsage.promptTokens.input - The number of input tokens.
* @param {Number} tokenUsage.promptTokens.write - The number of write tokens.
* @param {Number} tokenUsage.promptTokens.read - The number of read tokens.
* @param {Number} tokenUsage.completionTokens - The number of completion tokens used.
* @returns {Promise<void>} - Returns nothing.
* @throws {Error} - Throws an error if there's an issue creating the transactions.
*/
const spendStructuredTokens = async (txData, tokenUsage) => {
const { promptTokens, completionTokens } = tokenUsage;
logger.debug(
`[spendStructuredTokens] conversationId: ${txData.conversationId}${
txData?.context ? ` | Context: ${txData?.context}` : ''
} | Token usage: `,
{
promptTokens,
completionTokens,
},
);
let prompt, completion;
try {
if (promptTokens) {
const { input = 0, write = 0, read = 0 } = promptTokens;
prompt = await Transaction.createStructured({
...txData,
tokenType: 'prompt',
inputTokens: -input,
writeTokens: -write,
readTokens: -read,
});
}
if (completionTokens) {
completion = await Transaction.create({
...txData,
tokenType: 'completion',
rawAmount: -completionTokens,
});
}
if (prompt || completion) {
logger.debug('[spendStructuredTokens] Transaction data record against balance:', {
user: txData.user,
prompt: prompt?.prompt,
promptRate: prompt?.rate,
completion: completion?.completion,
completionRate: completion?.rate,
balance: completion?.balance ?? prompt?.balance,
});
} else {
logger.debug('[spendStructuredTokens] No transactions incurred against balance');
}
} catch (err) {
logger.error('[spendStructuredTokens]', err);
}
return { prompt, completion };
};
module.exports = { spendTokens, spendStructuredTokens };
module.exports = spendTokens;

View File

@@ -1,197 +0,0 @@
const mongoose = require('mongoose');
jest.mock('./Transaction', () => ({
Transaction: {
create: jest.fn(),
createStructured: jest.fn(),
},
}));
jest.mock('./Balance', () => ({
findOne: jest.fn(),
findOneAndUpdate: jest.fn(),
}));
jest.mock('~/config', () => ({
logger: {
debug: jest.fn(),
error: jest.fn(),
},
}));
// Import after mocking
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { Transaction } = require('./Transaction');
const Balance = require('./Balance');
describe('spendTokens', () => {
beforeEach(() => {
jest.clearAllMocks();
process.env.CHECK_BALANCE = 'true';
});
it('should create transactions for both prompt and completion tokens', async () => {
const txData = {
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 50,
};
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -50 });
Balance.findOne.mockResolvedValue({ tokenCredits: 10000 });
Balance.findOneAndUpdate.mockResolvedValue({ tokenCredits: 9850 });
await spendTokens(txData, tokenUsage);
expect(Transaction.create).toHaveBeenCalledTimes(2);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'prompt',
rawAmount: -100,
}),
);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'completion',
rawAmount: -50,
}),
);
});
it('should handle zero completion tokens', async () => {
const txData = {
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 0,
};
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -0 });
Balance.findOne.mockResolvedValue({ tokenCredits: 10000 });
Balance.findOneAndUpdate.mockResolvedValue({ tokenCredits: 9850 });
await spendTokens(txData, tokenUsage);
expect(Transaction.create).toHaveBeenCalledTimes(2);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'prompt',
rawAmount: -100,
}),
);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'completion',
rawAmount: -0, // Changed from 0 to -0
}),
);
});
it('should handle undefined token counts', async () => {
const txData = {
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
};
const tokenUsage = {};
await spendTokens(txData, tokenUsage);
expect(Transaction.create).not.toHaveBeenCalled();
});
it('should not update balance when CHECK_BALANCE is false', async () => {
process.env.CHECK_BALANCE = 'false';
const txData = {
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'gpt-3.5-turbo',
context: 'test',
};
const tokenUsage = {
promptTokens: 100,
completionTokens: 50,
};
Transaction.create.mockResolvedValueOnce({ tokenType: 'prompt', rawAmount: -100 });
Transaction.create.mockResolvedValueOnce({ tokenType: 'completion', rawAmount: -50 });
await spendTokens(txData, tokenUsage);
expect(Transaction.create).toHaveBeenCalledTimes(2);
expect(Balance.findOne).not.toHaveBeenCalled();
expect(Balance.findOneAndUpdate).not.toHaveBeenCalled();
});
it('should create structured transactions for both prompt and completion tokens', async () => {
const txData = {
user: new mongoose.Types.ObjectId(),
conversationId: 'test-convo',
model: 'claude-3-5-sonnet',
context: 'test',
};
const tokenUsage = {
promptTokens: {
input: 10,
write: 100,
read: 5,
},
completionTokens: 50,
};
Transaction.createStructured.mockResolvedValueOnce({
rate: 3.75,
user: txData.user.toString(),
balance: 9570,
prompt: -430,
});
Transaction.create.mockResolvedValueOnce({
rate: 15,
user: txData.user.toString(),
balance: 8820,
completion: -750,
});
const result = await spendStructuredTokens(txData, tokenUsage);
expect(Transaction.createStructured).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'prompt',
inputTokens: -10,
writeTokens: -100,
readTokens: -5,
}),
);
expect(Transaction.create).toHaveBeenCalledWith(
expect.objectContaining({
tokenType: 'completion',
rawAmount: -50,
}),
);
expect(result).toEqual({
prompt: expect.objectContaining({
rate: 3.75,
user: txData.user.toString(),
balance: 9570,
prompt: -430,
}),
completion: expect.objectContaining({
rate: 15,
user: txData.user.toString(),
balance: 8820,
completion: -750,
}),
});
});
});

View File

@@ -1,77 +1,35 @@
const { matchModelName } = require('../utils');
const defaultRate = 6;
/** AWS Bedrock pricing */
const bedrockValues = {
'llama2-13b': { prompt: 0.75, completion: 1.0 },
'llama2-70b': { prompt: 1.95, completion: 2.56 },
'llama3-8b': { prompt: 0.3, completion: 0.6 },
'llama3-70b': { prompt: 2.65, completion: 3.5 },
'llama3-1-8b': { prompt: 0.3, completion: 0.6 },
'llama3-1-70b': { prompt: 2.65, completion: 3.5 },
'llama3-1-405b': { prompt: 5.32, completion: 16.0 },
'mistral-7b': { prompt: 0.15, completion: 0.2 },
'mistral-small': { prompt: 0.15, completion: 0.2 },
'mixtral-8x7b': { prompt: 0.45, completion: 0.7 },
'mistral-large-2402': { prompt: 4.0, completion: 12.0 },
'mistral-large-2407': { prompt: 3.0, completion: 9.0 },
'command-text': { prompt: 1.5, completion: 2.0 },
'command-light': { prompt: 0.3, completion: 0.6 },
'ai21.j2-mid-v1': { prompt: 12.5, completion: 12.5 },
'ai21.j2-ultra-v1': { prompt: 18.8, completion: 18.8 },
'ai21.jamba-instruct-v1:0': { prompt: 0.5, completion: 0.7 },
'amazon.titan-text-lite-v1': { prompt: 0.15, completion: 0.2 },
'amazon.titan-text-express-v1': { prompt: 0.2, completion: 0.6 },
'amazon.titan-text-premier-v1:0': { prompt: 0.5, completion: 1.5 },
};
/**
* Mapping of model token sizes to their respective multipliers for prompt and completion.
* The rates are 1 USD per 1M tokens.
* @type {Object.<string, {prompt: number, completion: number}>}
*/
const tokenValues = Object.assign(
{
'8k': { prompt: 30, completion: 60 },
'32k': { prompt: 60, completion: 120 },
'4k': { prompt: 1.5, completion: 2 },
'16k': { prompt: 3, completion: 4 },
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'gpt-4o-2024-08-06': { prompt: 2.5, completion: 10 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 5, completion: 15 },
'gpt-4-1106': { prompt: 10, completion: 30 },
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
'claude-3-opus': { prompt: 15, completion: 75 },
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
'claude-3.5-sonnet': { prompt: 3, completion: 15 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
'claude-instant': { prompt: 0.8, completion: 2.4 },
'claude-': { prompt: 0.8, completion: 2.4 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
/* cohere doesn't have rates for the older command models,
const tokenValues = {
'8k': { prompt: 30, completion: 60 },
'32k': { prompt: 60, completion: 120 },
'4k': { prompt: 1.5, completion: 2 },
'16k': { prompt: 3, completion: 4 },
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'gpt-4o': { prompt: 5, completion: 15 },
'gpt-4-1106': { prompt: 10, completion: 30 },
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
'claude-3-opus': { prompt: 15, completion: 75 },
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
'claude-': { prompt: 0.8, completion: 2.4 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
/* cohere doesn't have rates for the older command models,
so this was from https://artificialanalysis.ai/models/command-light/providers */
command: { prompt: 0.38, completion: 0.38 },
'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
gemini: { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
},
bedrockValues,
);
/**
* Mapping of model token sizes to their respective multipliers for cached input, read and write.
* See Anthropic's documentation on this: https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#pricing
* The rates are 1 USD per 1M tokens.
* @type {Object.<string, {write: number, read: number }>}
*/
const cacheTokenValues = {
'claude-3.5-sonnet': { write: 3.75, read: 0.3 },
'claude-3-5-sonnet': { write: 3.75, read: 0.3 },
'claude-3-haiku': { write: 0.3, read: 0.03 },
command: { prompt: 0.38, completion: 0.38 },
// 'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
// 'gemini': { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
'gemini-1.5': { prompt: 0, completion: 0 }, // currently free
gemini: { prompt: 0, completion: 0 }, // currently free
};
/**
@@ -95,10 +53,6 @@ const getValueKey = (model, endpoint) => {
return 'gpt-3.5-turbo-1106';
} else if (modelName.includes('gpt-3.5')) {
return '4k';
} else if (modelName.includes('gpt-4o-2024-08-06')) {
return 'gpt-4o-2024-08-06';
} else if (modelName.includes('gpt-4o-mini')) {
return 'gpt-4o-mini';
} else if (modelName.includes('gpt-4o')) {
return 'gpt-4o';
} else if (modelName.includes('gpt-4-vision')) {
@@ -126,7 +80,7 @@ const getValueKey = (model, endpoint) => {
*
* @param {Object} params - The parameters for the function.
* @param {string} [params.valueKey] - The key corresponding to the model name.
* @param {'prompt' | 'completion'} [params.tokenType] - The type of token (e.g., 'prompt' or 'completion').
* @param {string} [params.tokenType] - The type of token (e.g., 'prompt' or 'completion').
* @param {string} [params.model] - The model name to derive the value key from if not provided.
* @param {string} [params.endpoint] - The endpoint name to derive the value key from if not provided.
* @param {EndpointTokenConfig} [params.endpointTokenConfig] - The token configuration for the endpoint.
@@ -151,41 +105,7 @@ const getMultiplier = ({ valueKey, tokenType, model, endpoint, endpointTokenConf
}
// If we got this far, and values[tokenType] is undefined somehow, return a rough average of default multipliers
return tokenValues[valueKey]?.[tokenType] ?? defaultRate;
return tokenValues[valueKey][tokenType] ?? defaultRate;
};
/**
* Retrieves the cache multiplier for a given value key and token type. If no value key is provided,
* it attempts to derive it from the model name.
*
* @param {Object} params - The parameters for the function.
* @param {string} [params.valueKey] - The key corresponding to the model name.
* @param {'write' | 'read'} [params.cacheType] - The type of token (e.g., 'write' or 'read').
* @param {string} [params.model] - The model name to derive the value key from if not provided.
* @param {string} [params.endpoint] - The endpoint name to derive the value key from if not provided.
* @param {EndpointTokenConfig} [params.endpointTokenConfig] - The token configuration for the endpoint.
* @returns {number | null} The multiplier for the given parameters, or `null` if not found.
*/
const getCacheMultiplier = ({ valueKey, cacheType, model, endpoint, endpointTokenConfig }) => {
if (endpointTokenConfig) {
return endpointTokenConfig?.[model]?.[cacheType] ?? null;
}
if (valueKey && cacheType) {
return cacheTokenValues[valueKey]?.[cacheType] ?? null;
}
if (!cacheType || !model) {
return null;
}
valueKey = getValueKey(model, endpoint);
if (!valueKey) {
return null;
}
// If we got this far, and values[cacheType] is undefined somehow, return a rough average of default multipliers
return cacheTokenValues[valueKey]?.[cacheType] ?? null;
};
module.exports = { tokenValues, getValueKey, getMultiplier, getCacheMultiplier, defaultRate };
module.exports = { tokenValues, getValueKey, getMultiplier, defaultRate };

View File

@@ -1,11 +1,4 @@
const { EModelEndpoint } = require('librechat-data-provider');
const {
defaultRate,
tokenValues,
getValueKey,
getMultiplier,
getCacheMultiplier,
} = require('./tx');
const { getValueKey, getMultiplier, defaultRate, tokenValues } = require('./tx');
describe('getValueKey', () => {
it('should return "16k" for model name containing "gpt-3.5-turbo-16k"', () => {
@@ -55,41 +48,6 @@ describe('getValueKey', () => {
expect(getValueKey('gpt-4o-turbo')).toBe('gpt-4o');
expect(getValueKey('gpt-4o-0125')).toBe('gpt-4o');
});
it('should return "gpt-4o-mini" for model type of "gpt-4o-mini"', () => {
expect(getValueKey('gpt-4o-mini-2024-07-18')).toBe('gpt-4o-mini');
expect(getValueKey('openai/gpt-4o-mini')).toBe('gpt-4o-mini');
expect(getValueKey('gpt-4o-mini-0718')).toBe('gpt-4o-mini');
expect(getValueKey('gpt-4o-2024-08-06-0718')).not.toBe('gpt-4o');
});
it('should return "gpt-4o-2024-08-06" for model type of "gpt-4o-2024-08-06"', () => {
expect(getValueKey('gpt-4o-2024-08-06-2024-07-18')).toBe('gpt-4o-2024-08-06');
expect(getValueKey('openai/gpt-4o-2024-08-06')).toBe('gpt-4o-2024-08-06');
expect(getValueKey('gpt-4o-2024-08-06-0718')).toBe('gpt-4o-2024-08-06');
expect(getValueKey('gpt-4o-2024-08-06-0718')).not.toBe('gpt-4o');
});
it('should return "gpt-4o" for model type of "chatgpt-4o"', () => {
expect(getValueKey('chatgpt-4o-latest')).toBe('gpt-4o');
expect(getValueKey('openai/chatgpt-4o-latest')).toBe('gpt-4o');
expect(getValueKey('chatgpt-4o-latest-0916')).toBe('gpt-4o');
expect(getValueKey('chatgpt-4o-latest-0718')).toBe('gpt-4o');
});
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
expect(getValueKey('claude-3-5-sonnet-turbo')).toBe('claude-3-5-sonnet');
expect(getValueKey('claude-3-5-sonnet-0125')).toBe('claude-3-5-sonnet');
});
it('should return "claude-3.5-sonnet" for model type of "claude-3.5-sonnet-"', () => {
expect(getValueKey('claude-3.5-sonnet-20240620')).toBe('claude-3.5-sonnet');
expect(getValueKey('anthropic/claude-3.5-sonnet')).toBe('claude-3.5-sonnet');
expect(getValueKey('claude-3.5-sonnet-turbo')).toBe('claude-3.5-sonnet');
expect(getValueKey('claude-3.5-sonnet-0125')).toBe('claude-3.5-sonnet');
});
});
describe('getMultiplier', () => {
@@ -144,30 +102,6 @@ describe('getMultiplier', () => {
);
});
it('should return the correct multiplier for gpt-4o-mini', () => {
const valueKey = getValueKey('gpt-4o-mini-2024-07-18');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(
tokenValues['gpt-4o-mini'].prompt,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-4o-mini'].completion,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
tokenValues['gpt-4-1106'].completion,
);
});
it('should return the correct multiplier for chatgpt-4o-latest', () => {
const valueKey = getValueKey('chatgpt-4o-latest');
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
tokenValues['gpt-4o'].completion,
);
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
tokenValues['gpt-4o-mini'].completion,
);
});
it('should derive the valueKey from the model if not provided for new models', () => {
expect(
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),
@@ -192,125 +126,3 @@ describe('getMultiplier', () => {
);
});
});
describe('AWS Bedrock Model Tests', () => {
const awsModels = [
'anthropic.claude-3-haiku-20240307-v1:0',
'anthropic.claude-3-sonnet-20240229-v1:0',
'anthropic.claude-3-opus-20240229-v1:0',
'anthropic.claude-3-5-sonnet-20240620-v1:0',
'anthropic.claude-v2:1',
'anthropic.claude-instant-v1',
'meta.llama2-13b-chat-v1',
'meta.llama2-70b-chat-v1',
'meta.llama3-8b-instruct-v1:0',
'meta.llama3-70b-instruct-v1:0',
'meta.llama3-1-8b-instruct-v1:0',
'meta.llama3-1-70b-instruct-v1:0',
'meta.llama3-1-405b-instruct-v1:0',
'mistral.mistral-7b-instruct-v0:2',
'mistral.mistral-small-2402-v1:0',
'mistral.mixtral-8x7b-instruct-v0:1',
'mistral.mistral-large-2402-v1:0',
'mistral.mistral-large-2407-v1:0',
'cohere.command-text-v14',
'cohere.command-light-text-v14',
'cohere.command-r-v1:0',
'cohere.command-r-plus-v1:0',
'ai21.j2-mid-v1',
'ai21.j2-ultra-v1',
'amazon.titan-text-lite-v1',
'amazon.titan-text-express-v1',
];
it('should return the correct prompt multipliers for all models', () => {
const results = awsModels.map((model) => {
const valueKey = getValueKey(model, EModelEndpoint.bedrock);
const multiplier = getMultiplier({ valueKey, tokenType: 'prompt' });
return tokenValues[valueKey].prompt && multiplier === tokenValues[valueKey].prompt;
});
expect(results.every(Boolean)).toBe(true);
});
it('should return the correct completion multipliers for all models', () => {
const results = awsModels.map((model) => {
const valueKey = getValueKey(model, EModelEndpoint.bedrock);
const multiplier = getMultiplier({ valueKey, tokenType: 'completion' });
return tokenValues[valueKey].completion && multiplier === tokenValues[valueKey].completion;
});
expect(results.every(Boolean)).toBe(true);
});
});
describe('getCacheMultiplier', () => {
it('should return the correct cache multiplier for a given valueKey and cacheType', () => {
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'write' })).toBe(3.75);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'read' })).toBe(0.3);
expect(getCacheMultiplier({ valueKey: 'claude-3-haiku', cacheType: 'write' })).toBe(0.3);
expect(getCacheMultiplier({ valueKey: 'claude-3-haiku', cacheType: 'read' })).toBe(0.03);
});
it('should return null if cacheType is provided but not found in cacheTokenValues', () => {
expect(
getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'unknownType' }),
).toBeNull();
});
it('should derive the valueKey from the model if not provided', () => {
expect(getCacheMultiplier({ cacheType: 'write', model: 'claude-3-5-sonnet-20240620' })).toBe(
3.75,
);
expect(getCacheMultiplier({ cacheType: 'read', model: 'claude-3-haiku-20240307' })).toBe(0.03);
});
it('should return null if only model or cacheType is missing', () => {
expect(getCacheMultiplier({ cacheType: 'write' })).toBeNull();
expect(getCacheMultiplier({ model: 'claude-3-5-sonnet' })).toBeNull();
});
it('should return null if derived valueKey does not match any known patterns', () => {
expect(getCacheMultiplier({ cacheType: 'write', model: 'gpt-4-some-other-info' })).toBeNull();
});
it('should handle endpointTokenConfig if provided', () => {
const endpointTokenConfig = {
'custom-model': {
write: 5,
read: 1,
},
};
expect(
getCacheMultiplier({ model: 'custom-model', cacheType: 'write', endpointTokenConfig }),
).toBe(5);
expect(
getCacheMultiplier({ model: 'custom-model', cacheType: 'read', endpointTokenConfig }),
).toBe(1);
});
it('should return null if model is not found in endpointTokenConfig', () => {
const endpointTokenConfig = {
'custom-model': {
write: 5,
read: 1,
},
};
expect(
getCacheMultiplier({ model: 'unknown-model', cacheType: 'write', endpointTokenConfig }),
).toBeNull();
});
it('should handle models with "bedrock/" prefix', () => {
expect(
getCacheMultiplier({
model: 'bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0',
cacheType: 'write',
}),
).toBe(3.75);
expect(
getCacheMultiplier({
model: 'bedrock/anthropic.claude-3-haiku-20240307-v1:0',
cacheType: 'read',
}),
).toBe(0.03);
});
});

View File

@@ -1,37 +1,28 @@
const bcrypt = require('bcryptjs');
const signPayload = require('~/server/services/signPayload');
const User = require('./User');
const hashPassword = async (password) => {
const hashedPassword = await new Promise((resolve, reject) => {
bcrypt.hash(password, 10, function (err, hash) {
if (err) {
reject(err);
} else {
resolve(hash);
}
});
});
return hashedPassword;
};
/**
* Retrieve a user by ID and convert the found user document to a plain object.
*
* @param {string} userId - The ID of the user to find and return as a plain object.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoUser>} A plain object representing the user document, or `null` if no user is found.
* @returns {Promise<Object>} A plain object representing the user document, or `null` if no user is found.
*/
const getUserById = async function (userId, fieldsToSelect = null) {
const query = User.findById(userId);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
};
/**
* Search for a single user based on partial data and return matching user document as plain object.
* @param {Partial<MongoUser>} searchCriteria - The partial data to use for searching the user.
* @param {string|string[]} [fieldsToSelect] - The fields to include or exclude in the returned document.
* @returns {Promise<MongoUser>} A plain object representing the user document, or `null` if no user is found.
*/
const findUser = async function (searchCriteria, fieldsToSelect = null) {
const query = User.findOne(searchCriteria);
if (fieldsToSelect) {
query.select(fieldsToSelect);
}
return await query.lean();
const getUser = async function (userId) {
return await User.findById(userId).lean();
};
/**
@@ -39,127 +30,17 @@ const findUser = async function (searchCriteria, fieldsToSelect = null) {
*
* @param {string} userId - The ID of the user to update.
* @param {Object} updateData - An object containing the properties to update.
* @returns {Promise<MongoUser>} The updated user document as a plain object, or `null` if no user is found.
* @returns {Promise<Object>} The updated user document as a plain object, or `null` if no user is found.
*/
const updateUser = async function (userId, updateData) {
const updateOperation = {
$set: updateData,
$unset: { expiresAt: '' }, // Remove the expiresAt field to prevent TTL
};
return await User.findByIdAndUpdate(userId, updateOperation, {
return await User.findByIdAndUpdate(userId, updateData, {
new: true,
runValidators: true,
}).lean();
};
/**
* Creates a new user, optionally with a TTL of 1 week.
* @param {MongoUser} data - The user data to be created, must contain user_id.
* @param {boolean} [disableTTL=true] - Whether to disable the TTL. Defaults to `true`.
* @param {boolean} [returnUser=false] - Whether to disable the TTL. Defaults to `true`.
* @returns {Promise<ObjectId>} A promise that resolves to the created user document ID.
* @throws {Error} If a user with the same user_id already exists.
*/
const createUser = async (data, disableTTL = true, returnUser = false) => {
const userData = {
...data,
expiresAt: disableTTL ? null : new Date(Date.now() + 604800 * 1000), // 1 week in milliseconds
};
if (disableTTL) {
delete userData.expiresAt;
}
const user = await User.create(userData);
if (returnUser) {
return user.toObject();
}
return user._id;
};
/**
* Count the number of user documents in the collection based on the provided filter.
*
* @param {Object} [filter={}] - The filter to apply when counting the documents.
* @returns {Promise<number>} The count of documents that match the filter.
*/
const countUsers = async function (filter = {}) {
return await User.countDocuments(filter);
};
/**
* Delete a user by their unique ID.
*
* @param {string} userId - The ID of the user to delete.
* @returns {Promise<{ deletedCount: number }>} An object indicating the number of deleted documents.
*/
const deleteUserById = async function (userId) {
try {
const result = await User.deleteOne({ _id: userId });
if (result.deletedCount === 0) {
return { deletedCount: 0, message: 'No user found with that ID.' };
}
return { deletedCount: result.deletedCount, message: 'User was deleted successfully.' };
} catch (error) {
throw new Error('Error deleting user: ' + error.message);
}
};
const { SESSION_EXPIRY } = process.env ?? {};
const expires = eval(SESSION_EXPIRY) ?? 1000 * 60 * 15;
/**
* Generates a JWT token for a given user.
*
* @param {MongoUser} user - ID of the user for whom the token is being generated.
* @returns {Promise<string>} A promise that resolves to a JWT token.
*/
const generateToken = async (user) => {
if (!user) {
throw new Error('No user provided');
}
return await signPayload({
payload: {
id: user._id,
username: user.username,
provider: user.provider,
email: user.email,
},
secret: process.env.JWT_SECRET,
expirationTime: expires / 1000,
});
};
/**
* Compares the provided password with the user's password.
*
* @param {MongoUser} user - the user to compare password for.
* @param {string} candidatePassword - The password to test against the user's password.
* @returns {Promise<boolean>} A promise that resolves to a boolean indicating if the password matches.
*/
const comparePassword = async (user, candidatePassword) => {
if (!user) {
throw new Error('No user provided');
}
return new Promise((resolve, reject) => {
bcrypt.compare(candidatePassword, user.password, (err, isMatch) => {
if (err) {
reject(err);
}
resolve(isMatch);
});
});
};
module.exports = {
comparePassword,
deleteUserById,
generateToken,
getUserById,
countUsers,
createUser,
hashPassword,
updateUser,
findUser,
getUser,
};

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.7.5-rc2",
"version": "0.7.2",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -12,7 +12,6 @@
"list-balances": "node ./list-balances.js",
"user-stats": "node ./user-stats.js",
"create-user": "node ./create-user.js",
"invite-user": "node ./invite-user.js",
"ban-user": "node ./ban-user.js",
"delete-user": "node ./delete-user.js"
},
@@ -40,20 +39,16 @@
"@keyv/mongo": "^2.1.8",
"@keyv/redis": "^2.8.1",
"@langchain/community": "^0.0.46",
"@langchain/core": "^0.2.18",
"@langchain/google-genai": "^0.0.11",
"@langchain/google-vertexai": "^0.0.17",
"@librechat/agents": "^1.5.2",
"@langchain/google-vertexai": "^0.0.5",
"agenda": "^5.0.0",
"axios": "^1.3.4",
"bcryptjs": "^2.4.3",
"cheerio": "^1.0.0-rc.12",
"cohere-ai": "^7.9.1",
"compression": "^1.7.4",
"connect-redis": "^7.1.0",
"cookie": "^0.5.0",
"cookie-parser": "^1.4.6",
"cors": "^2.8.5",
"dedent": "^1.5.3",
"dotenv": "^16.0.3",
"express": "^4.18.2",
"express-mongo-sanitize": "^2.2.0",
@@ -78,7 +73,6 @@
"module-alias": "^2.2.3",
"mongoose": "^7.1.1",
"multer": "^1.4.5-lts.1",
"nanoid": "^3.3.7",
"nodejs-gpt": "^1.37.4",
"nodemailer": "^6.9.4",
"ollama": "^0.5.0",
@@ -92,7 +86,6 @@
"passport-github2": "^0.1.12",
"passport-google-oauth20": "^2.0.0",
"passport-jwt": "^4.0.1",
"passport-ldapauth": "^3.0.1",
"passport-local": "^1.0.0",
"pino": "^8.12.1",
"sharp": "^0.32.6",
@@ -101,12 +94,10 @@
"ua-parser-js": "^1.0.36",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^4.7.1",
"ws": "^8.17.0",
"zod": "^3.22.4"
},
"devDependencies": {
"jest": "^29.7.0",
"mongodb-memory-server": "^10.0.0",
"jest": "^29.5.0",
"nodemon": "^3.0.1",
"supertest": "^6.3.3"
}

View File

@@ -1,9 +1,8 @@
const throttle = require('lodash/throttle');
const { getResponseSender, Constants, CacheKeys, Time } = require('librechat-data-provider');
const { getResponseSender, Constants, EModelEndpoint } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const { saveMessage } = require('~/models');
const { saveMessage, getConvo } = require('~/models');
const { logger } = require('~/config');
const AskController = async (req, res, next, initializeClient, addTitle) => {
@@ -19,7 +18,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
logger.debug('[AskController]', { text, conversationId, ...endpointOption });
let userMessage;
let userMessagePromise;
let promptTokens;
let userMessageId;
let responseMessageId;
@@ -36,8 +34,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
@@ -52,13 +48,11 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
try {
const { client } = await initializeClient({ req, res, endpointOption });
const messageCache = getLogStores(CacheKeys.MESSAGES);
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
const { onProgress: progressCallback, getPartialText } = createOnProgress({
onProgress: throttle(
({ text: partialText }) => {
/*
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
messageCache.set(responseMessageId, {
saveMessage({
messageId: responseMessageId,
sender,
conversationId,
@@ -68,10 +62,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
unfinished,
error: false,
user,
}, Time.FIVE_MINUTES);
*/
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
});
},
3000,
{ trailing: false },
@@ -83,7 +74,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
const getAbortData = () => ({
sender,
conversationId,
userMessagePromise,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
@@ -91,7 +81,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
const { abortController, onStart } = createAbortController(req, res, getAbortData);
res.on('close', () => {
logger.debug('[AskController] Request closed');
@@ -115,17 +105,22 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
getReqData,
onStart,
abortController,
progressCallback,
progressOptions: {
onProgress: progressCallback.call(null, {
res,
// parentMessageId: overrideParentMessageId || userMessageId,
},
text,
parentMessageId: overrideParentMessageId || userMessageId,
}),
};
let response = await client.sendMessage(text, messageOptions);
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
response.endpoint = endpointOption.endpoint;
const { conversation = {} } = await client.responsePromise;
const conversation = await getConvo(user, conversationId);
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
@@ -145,18 +140,10 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
});
res.end();
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/AskController.js - response end' },
);
await saveMessage({ ...response, user });
}
if (!client.skipSaveUserMessage) {
await saveMessage(req, userMessage, {
context: 'api/server/controllers/AskController.js - don\'t skip saving user message',
});
}
await saveMessage(userMessage);
if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {

View File

@@ -1,29 +1,45 @@
const crypto = require('crypto');
const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const { Session, User } = require('~/models');
const {
registerUser,
resetPassword,
setAuthTokens,
requestPasswordReset,
} = require('~/server/services/AuthService');
const { hashToken } = require('~/server/utils/crypto');
const { Session, getUserById } = require('~/models');
const { logger } = require('~/config');
const registrationController = async (req, res) => {
try {
const response = await registerUser(req.body);
const { status, message } = response;
res.status(status).send({ message });
if (response.status === 200) {
const { status, user } = response;
let newUser = await User.findOne({ _id: user._id });
if (!newUser) {
newUser = new User(user);
await newUser.save();
}
const token = await setAuthTokens(user._id, res);
res.setHeader('Authorization', `Bearer ${token}`);
res.status(status).send({ user });
} else {
const { status, message } = response;
res.status(status).send({ message });
}
} catch (err) {
logger.error('[registrationController]', err);
return res.status(500).json({ message: err.message });
}
};
const getUserController = async (req, res) => {
return res.status(200).send(req.user);
};
const resetPasswordRequestController = async (req, res) => {
try {
const resetService = await requestPasswordReset(req);
const resetService = await requestPasswordReset(req.body.email);
if (resetService instanceof Error) {
return res.status(400).json(resetService);
} else {
@@ -61,7 +77,7 @@ const refreshController = async (req, res) => {
try {
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const user = await getUserById(payload.id, '-password -__v');
const user = await User.findOne({ _id: payload.id });
if (!user) {
return res.status(401).redirect('/login');
}
@@ -70,17 +86,20 @@ const refreshController = async (req, res) => {
if (process.env.NODE_ENV === 'CI') {
const token = await setAuthTokens(userId, res);
return res.status(200).send({ token, user });
const userObj = user.toJSON();
return res.status(200).send({ token, user: userObj });
}
// Hash the refresh token
const hashedToken = await hashToken(refreshToken);
const hash = crypto.createHash('sha256');
const hashedToken = hash.update(refreshToken).digest('hex');
// Find the session with the hashed refresh token
const session = await Session.findOne({ user: userId, refreshTokenHash: hashedToken });
if (session && session.expiration > new Date()) {
const token = await setAuthTokens(userId, res, session._id);
res.status(200).send({ token, user });
const userObj = user.toJSON();
res.status(200).send({ token, user: userObj });
} else if (req?.query?.retry) {
// Retrying from a refresh token request that failed (401)
res.status(403).send('No session found');
@@ -96,6 +115,7 @@ const refreshController = async (req, res) => {
};
module.exports = {
getUserController,
refreshController,
registrationController,
resetPasswordController,

View File

@@ -1,4 +1,4 @@
const Balance = require('~/models/Balance');
const Balance = require('../../models/Balance');
async function balanceController(req, res) {
const { tokenCredits: balance = '' } =

View File

@@ -1,9 +1,8 @@
const throttle = require('lodash/throttle');
const { getResponseSender, CacheKeys, Time } = require('librechat-data-provider');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { getLogStores } = require('~/cache');
const { saveMessage } = require('~/models');
const { saveMessage, getConvo } = require('~/models');
const { logger } = require('~/config');
const EditController = async (req, res, next, initializeClient) => {
@@ -28,7 +27,6 @@ const EditController = async (req, res, next, initializeClient) => {
});
let userMessage;
let userMessagePromise;
let promptTokens;
const sender = getResponseSender({
...endpointOption,
@@ -42,8 +40,6 @@ const EditController = async (req, res, next, initializeClient) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
@@ -52,14 +48,12 @@ const EditController = async (req, res, next, initializeClient) => {
}
};
const messageCache = getLogStores(CacheKeys.MESSAGES);
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
const { onProgress: progressCallback, getPartialText } = createOnProgress({
generation,
onProgress: throttle(
({ text: partialText }) => {
/*
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
{
saveMessage({
messageId: responseMessageId,
sender,
conversationId,
@@ -70,8 +64,7 @@ const EditController = async (req, res, next, initializeClient) => {
isEdited: true,
error: false,
user,
} */
messageCache.set(responseMessageId, partialText, Time.FIVE_MINUTES);
});
},
3000,
{ trailing: false },
@@ -80,7 +73,6 @@ const EditController = async (req, res, next, initializeClient) => {
const getAbortData = () => ({
conversationId,
userMessagePromise,
messageId: responseMessageId,
sender,
parentMessageId: overrideParentMessageId ?? userMessageId,
@@ -89,7 +81,7 @@ const EditController = async (req, res, next, initializeClient) => {
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
const { abortController, onStart } = createAbortController(req, res, getAbortData);
res.on('close', () => {
logger.debug('[EditController] Request closed');
@@ -120,14 +112,14 @@ const EditController = async (req, res, next, initializeClient) => {
getReqData,
onStart,
abortController,
progressCallback,
progressOptions: {
onProgress: progressCallback.call(null, {
res,
// parentMessageId: overrideParentMessageId || userMessageId,
},
text,
parentMessageId: overrideParentMessageId || userMessageId,
}),
});
const { conversation = {} } = await client.responsePromise;
const conversation = await getConvo(user, conversationId);
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
@@ -145,11 +137,7 @@ const EditController = async (req, res, next, initializeClient) => {
});
res.end();
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/EditController.js - response end' },
);
await saveMessage({ ...response, user });
}
} catch (error) {
const partialText = getPartialText();

View File

@@ -44,14 +44,6 @@ async function endpointController(req, res) {
};
}
if (mergedConfig[EModelEndpoint.bedrock] && req.app.locals?.[EModelEndpoint.bedrock]) {
const { availableRegions } = req.app.locals[EModelEndpoint.bedrock];
mergedConfig[EModelEndpoint.bedrock] = {
...mergedConfig[EModelEndpoint.bedrock],
availableRegions,
};
}
const endpointsConfig = orderEndpointsConfig(mergedConfig);
await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);

View File

@@ -2,9 +2,6 @@ const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
/**
* @param {ServerRequest} req
*/
const getModelsConfig = async (req) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
@@ -17,7 +14,7 @@ const getModelsConfig = async (req) => {
/**
* Loads the models from the config.
* @param {ServerRequest} req - The Express request object.
* @param {Express.Request} req - The Express request object.
* @returns {Promise<TModelsConfig>} The models config.
*/
async function loadModels(req) {

View File

@@ -1,64 +1,11 @@
const {
Session,
Balance,
getFiles,
deleteFiles,
deleteConvos,
deletePresets,
deleteMessages,
deleteUserById,
} = require('~/models');
const User = require('~/models/User');
const { updateUserPluginsService } = require('~/server/services/UserService');
const { updateUserPluginAuth, deleteUserPluginAuth } = require('~/server/services/PluginService');
const { updateUserPluginsService, deleteUserKey } = require('~/server/services/UserService');
const { verifyEmail, resendVerificationEmail } = require('~/server/services/AuthService');
const { processDeleteRequest } = require('~/server/services/Files/process');
const { deleteAllSharedLinks } = require('~/models/Share');
const { Transaction } = require('~/models/Transaction');
const { logger } = require('~/config');
const getUserController = async (req, res) => {
res.status(200).send(req.user);
};
const getTermsStatusController = async (req, res) => {
try {
const user = await User.findById(req.user.id);
if (!user) {
return res.status(404).json({ message: 'User not found' });
}
res.status(200).json({ termsAccepted: !!user.termsAccepted });
} catch (error) {
logger.error('Error fetching terms acceptance status:', error);
res.status(500).json({ message: 'Error fetching terms acceptance status' });
}
};
const acceptTermsController = async (req, res) => {
try {
const user = await User.findByIdAndUpdate(req.user.id, { termsAccepted: true }, { new: true });
if (!user) {
return res.status(404).json({ message: 'User not found' });
}
res.status(200).json({ message: 'Terms accepted successfully' });
} catch (error) {
logger.error('Error accepting terms:', error);
res.status(500).json({ message: 'Error accepting terms' });
}
};
const deleteUserFiles = async (req) => {
try {
const userFiles = await getFiles({ user: req.user.id });
await processDeleteRequest({
req,
files: userFiles,
});
} catch (error) {
logger.error('[deleteUserFiles]', error);
}
};
const updateUserPluginsController = async (req, res) => {
const { user } = req;
const { pluginKey, action, auth, isAssistantTool } = req.body;
@@ -102,70 +49,11 @@ const updateUserPluginsController = async (req, res) => {
res.status(200).send();
} catch (err) {
logger.error('[updateUserPluginsController]', err);
return res.status(500).json({ message: 'Something went wrong.' });
}
};
const deleteUserController = async (req, res) => {
const { user } = req;
try {
await deleteMessages({ user: user.id }); // delete user messages
await Session.deleteMany({ user: user.id }); // delete user sessions
await Transaction.deleteMany({ user: user.id }); // delete user transactions
await deleteUserKey({ userId: user.id, all: true }); // delete user keys
await Balance.deleteMany({ user: user._id }); // delete user balances
await deletePresets(user.id); // delete user presets
/* TODO: Delete Assistant Threads */
await deleteConvos(user.id); // delete user convos
await deleteUserPluginAuth(user.id, null, true); // delete user plugin auth
await deleteUserById(user.id); // delete user
await deleteAllSharedLinks(user.id); // delete user shared links
await deleteUserFiles(req); // delete user files
await deleteFiles(null, user.id); // delete database files in case of orphaned files from previous steps
/* TODO: queue job for cleaning actions and assistants of non-existant users */
logger.info(`User deleted account. Email: ${user.email} ID: ${user.id}`);
res.status(200).send({ message: 'User deleted' });
} catch (err) {
logger.error('[deleteUserController]', err);
return res.status(500).json({ message: 'Something went wrong.' });
}
};
const verifyEmailController = async (req, res) => {
try {
const verifyEmailService = await verifyEmail(req);
if (verifyEmailService instanceof Error) {
return res.status(400).json(verifyEmailService);
} else {
return res.status(200).json(verifyEmailService);
}
} catch (e) {
logger.error('[verifyEmailController]', e);
return res.status(500).json({ message: 'Something went wrong.' });
}
};
const resendVerificationController = async (req, res) => {
try {
const result = await resendVerificationEmail(req);
if (result instanceof Error) {
return res.status(400).json(result);
} else {
return res.status(200).json(result);
}
} catch (e) {
logger.error('[verifyEmailController]', e);
return res.status(500).json({ message: 'Something went wrong.' });
res.status(500).json({ message: err.message });
}
};
module.exports = {
getUserController,
getTermsStatusController,
acceptTermsController,
deleteUserController,
verifyEmailController,
updateUserPluginsController,
resendVerificationController,
};

View File

@@ -1,127 +0,0 @@
const { GraphEvents, ToolEndHandler, ChatModelStreamHandler } = require('@librechat/agents');
/** @typedef {import('@librechat/agents').Graph} Graph */
/** @typedef {import('@librechat/agents').EventHandler} EventHandler */
/** @typedef {import('@librechat/agents').ModelEndData} ModelEndData */
/** @typedef {import('@librechat/agents').ChatModelStreamHandler} ChatModelStreamHandler */
/** @typedef {import('@librechat/agents').ContentAggregatorResult['aggregateContent']} ContentAggregator */
/** @typedef {import('@librechat/agents').GraphEvents} GraphEvents */
/**
* Sends message data in Server Sent Events format.
* @param {ServerResponse} res - The server response.
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
* @param {string} event.event - The type of event.
* @param {string} event.data - The message to be sent.
*/
const sendEvent = (res, event) => {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
class ModelEndHandler {
/**
* @param {Array<UsageMetadata>} collectedUsage
*/
constructor(collectedUsage) {
if (!Array.isArray(collectedUsage)) {
throw new Error('collectedUsage must be an array');
}
this.collectedUsage = collectedUsage;
}
/**
* @param {string} event
* @param {ModelEndData | undefined} data
* @param {Record<string, unknown> | undefined} metadata
* @param {Graph} graph
* @returns
*/
handle(event, data, metadata, graph) {
if (!graph || !metadata) {
console.warn(`Graph or metadata not found in ${event} event`);
return;
}
const usage = data?.output?.usage_metadata;
if (usage) {
this.collectedUsage.push(usage);
}
}
}
/**
* Get default handlers for stream events.
* @param {Object} options - The options object.
* @param {ServerResponse} options.res - The options object.
* @param {ContentAggregator} options.aggregateContent - The options object.
* @param {Array<UsageMetadata>} options.collectedUsage - The list of collected usage metadata.
* @returns {Record<string, t.EventHandler>} The default handlers.
* @throws {Error} If the request is not found.
*/
function getDefaultHandlers({ res, aggregateContent, collectedUsage }) {
if (!res || !aggregateContent) {
throw new Error(
`[getDefaultHandlers] Missing required options: res: ${!res}, aggregateContent: ${!aggregateContent}`,
);
}
const handlers = {
[GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
[GraphEvents.TOOL_END]: new ToolEndHandler(),
[GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
[GraphEvents.ON_RUN_STEP]: {
/**
* Handle ON_RUN_STEP event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
aggregateContent({ event, data });
},
},
[GraphEvents.ON_RUN_STEP_DELTA]: {
/**
* Handle ON_RUN_STEP_DELTA event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
aggregateContent({ event, data });
},
},
[GraphEvents.ON_RUN_STEP_COMPLETED]: {
/**
* Handle ON_RUN_STEP_COMPLETED event.
* @param {string} event - The event name.
* @param {StreamEventData & { result: ToolEndData }} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
aggregateContent({ event, data });
},
},
[GraphEvents.ON_MESSAGE_DELTA]: {
/**
* Handle ON_MESSAGE_DELTA event.
* @param {string} event - The event name.
* @param {StreamEventData} data - The event data.
*/
handle: (event, data) => {
sendEvent(res, { event, data });
aggregateContent({ event, data });
},
},
};
return handlers;
}
module.exports = {
sendEvent,
getDefaultHandlers,
};

View File

@@ -1,608 +0,0 @@
// const { HttpsProxyAgent } = require('https-proxy-agent');
// const {
// Constants,
// ImageDetail,
// EModelEndpoint,
// resolveHeaders,
// validateVisionModel,
// mapModelToAzureConfig,
// } = require('librechat-data-provider');
const { Callback, createMetadataAggregator } = require('@librechat/agents');
const {
Constants,
EModelEndpoint,
bedrockOutputParser,
providerEndpointMap,
removeNullishValues,
} = require('librechat-data-provider');
const {
extractBaseURL,
// constructAzureURL,
// genAzureChatCompletion,
} = require('~/utils');
const {
formatMessage,
formatAgentMessages,
createContextHandlers,
} = require('~/app/clients/prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const BaseClient = require('~/app/clients/BaseClient');
// const { sleep } = require('~/server/utils');
const { createRun } = require('./run');
const { logger } = require('~/config');
/** @typedef {import('@librechat/agents').MessageContentComplex} MessageContentComplex */
// const providerSchemas = {
// [EModelEndpoint.bedrock]: true,
// };
const providerParsers = {
[EModelEndpoint.bedrock]: bedrockOutputParser,
};
class AgentClient extends BaseClient {
constructor(options = {}) {
super(null, options);
/** @type {'discard' | 'summarize'} */
this.contextStrategy = 'discard';
/** @deprecated @type {true} - Is a Chat Completion Request */
this.isChatCompletion = true;
/** @type {AgentRun} */
this.run;
const {
maxContextTokens,
modelOptions = {},
contentParts,
collectedUsage,
...clientOptions
} = options;
this.modelOptions = modelOptions;
this.maxContextTokens = maxContextTokens;
/** @type {MessageContentComplex[]} */
this.contentParts = contentParts;
/** @type {Array<UsageMetadata>} */
this.collectedUsage = collectedUsage;
this.options = Object.assign({ endpoint: options.endpoint }, clientOptions);
}
/**
* Returns the aggregated content parts for the current run.
* @returns {MessageContentComplex[]} */
getContentParts() {
return this.contentParts;
}
setOptions(options) {
logger.info('[api/server/controllers/agents/client.js] setOptions', options);
}
/**
*
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
logger.info(
'[api/server/controllers/agents/client.js #checkVisionRequest] not implemented',
attachments,
);
// if (!attachments) {
// return;
// }
// const availableModels = this.options.modelsConfig?.[this.options.endpoint];
// if (!availableModels) {
// return;
// }
// let visionRequestDetected = false;
// for (const file of attachments) {
// if (file?.type?.includes('image')) {
// visionRequestDetected = true;
// break;
// }
// }
// if (!visionRequestDetected) {
// return;
// }
// this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
// if (this.isVisionModel) {
// delete this.modelOptions.stop;
// return;
// }
// for (const model of availableModels) {
// if (!validateVisionModel({ model, availableModels })) {
// continue;
// }
// this.modelOptions.model = model;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
// return;
// }
// if (!availableModels.includes(this.defaultVisionModel)) {
// return;
// }
// if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) {
// return;
// }
// this.modelOptions.model = this.defaultVisionModel;
// this.isVisionModel = true;
// delete this.modelOptions.stop;
}
getSaveOptions() {
const parseOptions = providerParsers[this.options.endpoint];
let runOptions =
this.options.endpoint === EModelEndpoint.agents
? {
model: undefined,
// TODO:
// would need to be override settings; otherwise, model needs to be undefined
// model: this.override.model,
// instructions: this.override.instructions,
// additional_instructions: this.override.additional_instructions,
}
: {};
if (parseOptions) {
runOptions = parseOptions(this.modelOptions);
}
return removeNullishValues(
Object.assign(
{
endpoint: this.options.endpoint,
agent_id: this.options.agent.id,
modelLabel: this.options.modelLabel,
maxContextTokens: this.options.maxContextTokens,
resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
spec: this.options.spec,
},
// TODO: PARSE OPTIONS BY PROVIDER, MAY CONTAIN SENSITIVE DATA
runOptions,
),
);
}
getBuildMessagesOptions(opts) {
return {
instructions: opts.instructions,
additional_instructions: opts.additional_instructions,
};
}
async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(
this.options.req,
attachments,
this.options.agent.provider,
);
message.image_urls = image_urls.length ? image_urls : undefined;
return files;
}
async buildMessages(
messages,
parentMessageId,
{ instructions = null, additional_instructions = null },
opts,
) {
let orderedMessages = this.constructor.getMessagesForConversation({
messages,
parentMessageId,
summary: this.shouldSummarize,
});
let payload;
/** @type {{ role: string; name: string; content: string } | undefined} */
let systemMessage;
/** @type {number | undefined} */
let promptTokens;
/** @type {string} */
let systemContent = `${instructions ?? ''}${additional_instructions ?? ''}`;
if (this.options.attachments) {
const attachments = await this.options.attachments;
if (this.message_file_map) {
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
} else {
this.message_file_map = {
[orderedMessages[orderedMessages.length - 1].messageId]: attachments,
};
}
const files = await this.addImageURLs(
orderedMessages[orderedMessages.length - 1],
attachments,
);
this.options.attachments = files;
}
if (this.message_file_map) {
this.contextHandlers = createContextHandlers(
this.options.req,
orderedMessages[orderedMessages.length - 1].text,
);
}
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
userName: this.options?.name,
assistantName: this.options?.modelLabel,
});
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
}
/* If message has files, calculate image token cost */
// if (this.message_file_map && this.message_file_map[message.messageId]) {
// const attachments = this.message_file_map[message.messageId];
// for (const file of attachments) {
// if (file.embedded) {
// this.contextHandlers?.processFile(file);
// continue;
// }
// orderedMessages[i].tokenCount += this.calculateImageTokenCost({
// width: file.width,
// height: file.height,
// detail: this.options.imageDetail ?? ImageDetail.auto,
// });
// }
// }
return formattedMessage;
});
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
systemContent = this.augmentedPrompt + systemContent;
}
if (systemContent) {
systemContent = `${systemContent.trim()}`;
systemMessage = {
role: 'system',
name: 'instructions',
content: systemContent,
};
if (this.contextStrategy) {
const instructionTokens = this.getTokenCountForMessage(systemMessage);
if (instructionTokens >= 0) {
const firstMessageTokens = orderedMessages[0].tokenCount ?? 0;
orderedMessages[0].tokenCount = firstMessageTokens + instructionTokens;
}
}
}
if (this.contextStrategy) {
({ payload, promptTokens, messages } = await this.handleContextStrategy({
orderedMessages,
formattedMessages,
/* prefer usage_metadata from final message */
buildTokenMap: false,
}));
}
const result = {
prompt: payload,
promptTokens,
messages,
};
if (promptTokens >= 0 && typeof opts?.getReqData === 'function') {
opts.getReqData({ promptTokens });
}
return result;
}
/** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
this.modelOptions.user = this.user;
await this.chatCompletion({
payload,
onProgress: opts.onProgress,
abortController: opts.abortController,
});
return this.contentParts;
}
/**
* @param {Object} params
* @param {string} [params.model]
* @param {string} [params.context='message']
* @param {UsageMetadata[]} [params.collectedUsage=this.collectedUsage]
*/
async recordCollectedUsage({ model, context = 'message', collectedUsage = this.collectedUsage }) {
for (const usage of collectedUsage) {
await spendTokens(
{
context,
model: model ?? this.modelOptions.model,
conversationId: this.conversationId,
user: this.user ?? this.options.req.user?.id,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens: usage.input_tokens, completionTokens: usage.output_tokens },
);
}
}
async chatCompletion({ payload, abortController = null }) {
try {
if (!abortController) {
abortController = new AbortController();
}
const baseURL = extractBaseURL(this.completionsUrl);
logger.debug('[api/server/controllers/agents/client.js] chatCompletion', {
baseURL,
payload,
});
// if (this.useOpenRouter) {
// opts.defaultHeaders = {
// 'HTTP-Referer': 'https://librechat.ai',
// 'X-Title': 'LibreChat',
// };
// }
// if (this.options.headers) {
// opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
// }
// if (this.options.proxy) {
// opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
// }
// if (this.isVisionModel) {
// modelOptions.max_tokens = 4000;
// }
// /** @type {TAzureConfig | undefined} */
// const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
// if (
// (this.azure && this.isVisionModel && azureConfig) ||
// (azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
// ) {
// const { modelGroupMap, groupMap } = azureConfig;
// const {
// azureOptions,
// baseURL,
// headers = {},
// serverless,
// } = mapModelToAzureConfig({
// modelName: modelOptions.model,
// modelGroupMap,
// groupMap,
// });
// opts.defaultHeaders = resolveHeaders(headers);
// this.langchainProxy = extractBaseURL(baseURL);
// this.apiKey = azureOptions.azureOpenAIApiKey;
// const groupName = modelGroupMap[modelOptions.model].group;
// this.options.addParams = azureConfig.groupMap[groupName].addParams;
// this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// // Note: `forcePrompt` not re-assigned as only chat models are vision models
// this.azure = !serverless && azureOptions;
// this.azureEndpoint =
// !serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
// }
// if (this.azure || this.options.azure) {
// /* Azure Bug, extremely short default `max_tokens` response */
// if (!modelOptions.max_tokens && modelOptions.model === 'gpt-4-vision-preview') {
// modelOptions.max_tokens = 4000;
// }
// /* Azure does not accept `model` in the body, so we need to remove it. */
// delete modelOptions.model;
// opts.baseURL = this.langchainProxy
// ? constructAzureURL({
// baseURL: this.langchainProxy,
// azureOptions: this.azure,
// })
// : this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
// opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
// opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
// }
// if (process.env.OPENAI_ORGANIZATION) {
// opts.organization = process.env.OPENAI_ORGANIZATION;
// }
// if (this.options.addParams && typeof this.options.addParams === 'object') {
// modelOptions = {
// ...modelOptions,
// ...this.options.addParams,
// };
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] added params', {
// addParams: this.options.addParams,
// modelOptions,
// });
// }
// if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
// this.options.dropParams.forEach((param) => {
// delete modelOptions[param];
// });
// logger.debug('[api/server/controllers/agents/client.js #chatCompletion] dropped params', {
// dropParams: this.options.dropParams,
// modelOptions,
// });
// }
const run = await createRun({
req: this.options.req,
agent: this.options.agent,
tools: this.options.tools,
toolMap: this.options.toolMap,
runId: this.responseMessageId,
modelOptions: this.modelOptions,
customHandlers: this.options.eventHandlers,
});
const config = {
configurable: {
provider: providerEndpointMap[this.options.agent.provider],
thread_id: this.conversationId,
},
run_id: this.responseMessageId,
signal: abortController.signal,
streamMode: 'values',
version: 'v2',
};
if (!run) {
throw new Error('Failed to create run');
}
this.run = run;
const messages = formatAgentMessages(payload);
await run.processStream({ messages }, config, {
[Callback.TOOL_ERROR]: (graph, error, toolId) => {
logger.error(
'[api/server/controllers/agents/client.js #chatCompletion] Tool Error',
error,
toolId,
);
},
});
this.recordCollectedUsage({ context: 'message' }).catch((err) => {
logger.error(
'[api/server/controllers/agents/client.js #chatCompletion] Error recording collected usage',
err,
);
});
} catch (err) {
if (!abortController.signal.aborted) {
logger.error(
'[api/server/controllers/agents/client.js #sendCompletion] Unhandled error type',
err,
);
throw err;
}
logger.warn(
'[api/server/controllers/agents/client.js #sendCompletion] Operation aborted',
err,
);
}
}
/**
*
* @param {Object} params
* @param {string} params.text
* @param {string} params.conversationId
*/
async titleConvo({ text }) {
if (!this.run) {
throw new Error('Run not initialized');
}
const { handleLLMEnd, collected: collectedMetadata } = createMetadataAggregator();
const clientOptions = {};
const providerConfig = this.options.req.app.locals[this.options.agent.provider];
if (
providerConfig &&
providerConfig.titleModel &&
providerConfig.titleModel !== Constants.CURRENT_MODEL
) {
clientOptions.model = providerConfig.titleModel;
}
try {
const titleResult = await this.run.generateTitle({
inputText: text,
contentParts: this.contentParts,
clientOptions,
chainOptions: {
callbacks: [
{
handleLLMEnd,
},
],
},
});
const collectedUsage = collectedMetadata.map((item) => {
let input_tokens, output_tokens;
if (item.usage) {
input_tokens = item.usage.input_tokens || item.usage.inputTokens;
output_tokens = item.usage.output_tokens || item.usage.outputTokens;
} else if (item.tokenUsage) {
input_tokens = item.tokenUsage.promptTokens;
output_tokens = item.tokenUsage.completionTokens;
}
return {
input_tokens: input_tokens,
output_tokens: output_tokens,
};
});
this.recordCollectedUsage({
model: clientOptions.model,
context: 'title',
collectedUsage,
}).catch((err) => {
logger.error(
'[api/server/controllers/agents/client.js #titleConvo] Error recording collected usage',
err,
);
});
return titleResult.title;
} catch (err) {
logger.error('[api/server/controllers/agents/client.js #titleConvo] Error', err);
return;
}
}
getEncoding() {
return this.modelOptions.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
}
/**
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
* @param {string} text - The text to get the token count for.
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
}
}
module.exports = AgentClient;

View File

@@ -1,153 +0,0 @@
// errorHandler.js
const { logger } = require('~/config');
const getLogStores = require('~/cache/getLogStores');
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { recordUsage } = require('~/server/services/Threads');
const { getConvo } = require('~/models/Conversation');
const { sendResponse } = require('~/server/utils');
/**
* @typedef {Object} ErrorHandlerContext
* @property {OpenAIClient} openai - The OpenAI client
* @property {string} run_id - The run ID
* @property {boolean} completedRun - Whether the run has completed
* @property {string} assistant_id - The assistant ID
* @property {string} conversationId - The conversation ID
* @property {string} parentMessageId - The parent message ID
* @property {string} responseMessageId - The response message ID
* @property {string} endpoint - The endpoint being used
* @property {string} cacheKey - The cache key for the current request
*/
/**
* @typedef {Object} ErrorHandlerDependencies
* @property {Express.Request} req - The Express request object
* @property {Express.Response} res - The Express response object
* @property {() => ErrorHandlerContext} getContext - Function to get the current context
* @property {string} [originPath] - The origin path for the error handler
*/
/**
* Creates an error handler function with the given dependencies
* @param {ErrorHandlerDependencies} dependencies - The dependencies for the error handler
* @returns {(error: Error) => Promise<void>} The error handler function
*/
const createErrorHandler = ({ req, res, getContext, originPath = '/assistants/chat/' }) => {
const cache = getLogStores(CacheKeys.ABORT_KEYS);
/**
* Handles errors that occur during the chat process
* @param {Error} error - The error that occurred
* @returns {Promise<void>}
*/
return async (error) => {
const {
openai,
run_id,
endpoint,
cacheKey,
completedRun,
assistant_id,
conversationId,
parentMessageId,
responseMessageId,
} = getContext();
const defaultErrorMessage =
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
const messageData = {
assistant_id,
conversationId,
parentMessageId,
sender: 'System',
user: req.user.id,
shouldSaveMessage: false,
messageId: responseMessageId,
endpoint,
};
if (error.message === 'Run cancelled') {
return res.end();
} else if (error.message === 'Request closed' && completedRun) {
return;
} else if (error.message === 'Request closed') {
logger.debug(`[${originPath}] Request aborted on close`);
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
endpoint === 'azureAssistants'
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(req, res, messageData, errorMessage);
} else if (error?.message?.includes('string too long')) {
return sendResponse(
req,
res,
messageData,
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
);
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
return sendResponse(req, res, messageData, error.message);
} else {
logger.error(`[${originPath}]`, error);
}
if (!openai || !run_id) {
return sendResponse(req, res, messageData, defaultErrorMessage);
}
await new Promise((resolve) => setTimeout(resolve, 2000));
try {
const status = await cache.get(cacheKey);
if (status === 'cancelled') {
logger.debug(`[${originPath}] Run already cancelled`);
return res.end();
}
await cache.delete(cacheKey);
// const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
// logger.debug(`[${originPath}] Cancelled run:`, cancelledRun);
} catch (error) {
logger.error(`[${originPath}] Error cancelling run`, error);
}
await new Promise((resolve) => setTimeout(resolve, 2000));
let run;
try {
// run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error(`[${originPath}] Error fetching or processing run`, error);
}
let finalEvent;
try {
// const errorContentPart = {
// text: {
// value:
// error?.message ?? 'There was an error processing your request. Please try again later.',
// },
// type: ContentTypes.ERROR,
// };
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
// runMessages,
};
} catch (error) {
logger.error(`[${originPath}] Error finalizing error process`, error);
return sendResponse(req, res, messageData, 'The Assistant run failed');
}
return sendResponse(req, res, finalEvent);
};
};
module.exports = { createErrorHandler };

View File

@@ -1,106 +0,0 @@
const { HttpsProxyAgent } = require('https-proxy-agent');
const { resolveHeaders } = require('librechat-data-provider');
const { createLLM } = require('~/app/clients/llm');
/**
* Initializes and returns a Language Learning Model (LLM) instance.
*
* @param {Object} options - Configuration options for the LLM.
* @param {string} options.model - The model identifier.
* @param {string} options.modelName - The specific name of the model.
* @param {number} options.temperature - The temperature setting for the model.
* @param {number} options.presence_penalty - The presence penalty for the model.
* @param {number} options.frequency_penalty - The frequency penalty for the model.
* @param {number} options.max_tokens - The maximum number of tokens for the model output.
* @param {boolean} options.streaming - Whether to use streaming for the model output.
* @param {Object} options.context - The context for the conversation.
* @param {number} options.tokenBuffer - The token buffer size.
* @param {number} options.initialMessageCount - The initial message count.
* @param {string} options.conversationId - The ID of the conversation.
* @param {string} options.user - The user identifier.
* @param {string} options.langchainProxy - The langchain proxy URL.
* @param {boolean} options.useOpenRouter - Whether to use OpenRouter.
* @param {Object} options.options - Additional options.
* @param {Object} options.options.headers - Custom headers for the request.
* @param {string} options.options.proxy - Proxy URL.
* @param {Object} options.options.req - The request object.
* @param {Object} options.options.res - The response object.
* @param {boolean} options.options.debug - Whether to enable debug mode.
* @param {string} options.apiKey - The API key for authentication.
* @param {Object} options.azure - Azure-specific configuration.
* @param {Object} options.abortController - The AbortController instance.
* @returns {Object} The initialized LLM instance.
*/
function initializeLLM(options) {
const {
model,
modelName,
temperature,
presence_penalty,
frequency_penalty,
max_tokens,
streaming,
user,
langchainProxy,
useOpenRouter,
options: { headers, proxy },
apiKey,
azure,
} = options;
const modelOptions = {
modelName: modelName || model,
temperature,
presence_penalty,
frequency_penalty,
user,
};
if (max_tokens) {
modelOptions.max_tokens = max_tokens;
}
const configOptions = {};
if (langchainProxy) {
configOptions.basePath = langchainProxy;
}
if (useOpenRouter) {
configOptions.basePath = 'https://openrouter.ai/api/v1';
configOptions.baseOptions = {
headers: {
'HTTP-Referer': 'https://librechat.ai',
'X-Title': 'LibreChat',
},
};
}
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
configOptions.baseOptions = {
headers: resolveHeaders({
...headers,
...configOptions?.baseOptions?.headers,
}),
};
}
if (proxy) {
configOptions.httpAgent = new HttpsProxyAgent(proxy);
configOptions.httpsAgent = new HttpsProxyAgent(proxy);
}
const llm = createLLM({
modelOptions,
configOptions,
openAIApiKey: apiKey,
azure,
streaming,
});
return llm;
}
module.exports = {
initializeLLM,
};

View File

@@ -1,142 +0,0 @@
const { Constants } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { sendMessage } = require('~/server/utils');
const { saveMessage } = require('~/models');
const { logger } = require('~/config');
const AgentController = async (req, res, next, initializeClient, addTitle) => {
let {
text,
endpointOption,
conversationId,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
let sender;
let userMessage;
let promptTokens;
let userMessageId;
let responseMessageId;
let userMessagePromise;
const newConvo = !conversationId;
const user = req.user.id;
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'userMessagePromise') {
userMessagePromise = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
} else if (key === 'sender') {
sender = data[key];
} else if (!conversationId && key === 'conversationId') {
conversationId = data[key];
}
}
};
try {
/** @type {{ client: TAgentClient }} */
const { client } = await initializeClient({ req, res, endpointOption });
const getAbortData = () => ({
sender,
userMessage,
promptTokens,
conversationId,
userMessagePromise,
messageId: responseMessageId,
content: client.getContentParts(),
parentMessageId: overrideParentMessageId ?? userMessageId,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData, getReqData);
res.on('close', () => {
logger.debug('[AgentController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
abortController.abort();
logger.debug('[AgentController] Request aborted on close');
});
const messageOptions = {
user,
onStart,
getReqData,
conversationId,
parentMessageId,
abortController,
overrideParentMessageId,
progressOptions: {
res,
// parentMessageId: overrideParentMessageId || userMessageId,
},
};
let response = await client.sendMessage(text, messageOptions);
response.endpoint = endpointOption.endpoint;
const { conversation = {} } = await client.responsePromise;
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
if (client.options.attachments) {
userMessage.files = client.options.attachments;
delete userMessage.image_urls;
}
if (!abortController.signal.aborted) {
sendMessage(res, {
final: true,
conversation,
title: conversation.title,
requestMessage: userMessage,
responseMessage: response,
});
res.end();
await saveMessage(
req,
{ ...response, user },
{ context: 'api/server/controllers/agents/request.js - response end' },
);
}
if (!client.skipSaveUserMessage) {
await saveMessage(req, userMessage, {
context: 'api/server/controllers/agents/request.js - don\'t skip saving user message',
});
}
if (addTitle && parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {
text,
response,
client,
});
}
} catch (error) {
handleAbortError(res, req, error, {
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
});
}
};
module.exports = AgentController;

Some files were not shown because too many files have changed in this diff Show More