Compare commits

..

5 Commits

949 changed files with 60566 additions and 60398 deletions

View File

@@ -20,11 +20,6 @@ DOMAIN_CLIENT=http://localhost:3080
DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
# Use the address that is at most n number of hops away from the Express application.
# req.socket.remoteAddress is the first hop, and the rest are looked for in the X-Forwarded-For header from right to left.
# A value of 0 means that the first untrusted address would be req.socket.remoteAddress, i.e. there is no reverse proxy.
# Defaulted to 1.
TRUST_PROXY=1
#===============#
# JSON Logging #
@@ -58,7 +53,7 @@ DEBUG_CONSOLE=false
# Endpoints #
#===================================================#
# ENDPOINTS=openAI,assistants,azureOpenAI,google,gptPlugins,anthropic
# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,google,gptPlugins,anthropic
PROXY=
@@ -88,7 +83,7 @@ PROXY=
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-7-sonnet-latest,claude-3-7-sonnet-20250219,claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_MODELS=claude-3-5-haiku-20241022,claude-3-5-sonnet-20241022,claude-3-5-sonnet-latest,claude-3-5-sonnet-20240620,claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
# ANTHROPIC_REVERSE_PROXY=
#============#
@@ -110,6 +105,13 @@ ANTHROPIC_API_KEY=user_provided
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
# PLUGINS_USE_AZURE="true" # Deprecated
#============#
# BingAI #
#============#
BINGAI_TOKEN=user_provided
# BINGAI_HOST=https://cn.bing.com
#=================#
# AWS Bedrock #
#=================#
@@ -117,7 +119,6 @@ ANTHROPIC_API_KEY=user_provided
# BEDROCK_AWS_DEFAULT_REGION=us-east-1 # A default region must be provided
# BEDROCK_AWS_ACCESS_KEY_ID=someAccessKey
# BEDROCK_AWS_SECRET_ACCESS_KEY=someSecretAccessKey
# BEDROCK_AWS_SESSION_TOKEN=someSessionToken
# Note: This example list is not meant to be exhaustive. If omitted, all known, supported model IDs will be included for you.
# BEDROCK_AWS_MODELS=anthropic.claude-3-5-sonnet-20240620-v1:0,meta.llama3-1-8b-instruct-v1:0
@@ -136,13 +137,10 @@ ANTHROPIC_API_KEY=user_provided
#============#
GOOGLE_KEY=user_provided
# GOOGLE_REVERSE_PROXY=
# Some reverse proxies do not support the X-goog-api-key header, uncomment to pass the API key in Authorization header instead.
# GOOGLE_AUTH_HEADER=true
# Gemini API (AI Studio)
# GOOGLE_MODELS=gemini-2.0-flash-exp,gemini-2.0-flash-thinking-exp-1219,gemini-exp-1121,gemini-exp-1114,gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# GOOGLE_MODELS=gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
# Vertex AI
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0514,gemini-1.0-pro-vision-001,gemini-1.0-pro-002,gemini-1.0-pro-001,gemini-pro-vision,gemini-1.0-pro
@@ -168,22 +166,21 @@ GOOGLE_KEY=user_provided
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
# GOOGLE_SAFETY_CIVIC_INTEGRITY=BLOCK_ONLY_HIGH
#============#
# OpenAI #
#============#
OPENAI_API_KEY=user_provided
# OPENAI_MODELS=o1,o1-mini,o1-preview,gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
# OPENAI_MODELS=gpt-4o,chatgpt-4o-latest,gpt-4o-mini,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
DEBUG_OPENAI=false
# TITLE_CONVO=false
# OPENAI_TITLE_MODEL=gpt-4o-mini
# OPENAI_TITLE_MODEL=gpt-3.5-turbo
# OPENAI_SUMMARIZE=true
# OPENAI_SUMMARY_MODEL=gpt-4o-mini
# OPENAI_SUMMARY_MODEL=gpt-3.5-turbo
# OPENAI_FORCE_PROMPT=true
@@ -209,6 +206,12 @@ ASSISTANTS_API_KEY=user_provided
# More info, including how to enable use of Assistants with Azure here:
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
#============#
# OpenRouter #
#============#
# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
# OPENROUTER_API_KEY=
#============#
# Plugins #
#============#
@@ -248,16 +251,11 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# DALLE3_AZURE_API_VERSION=
# DALLE2_AZURE_API_VERSION=
# Google
#-----------------
GOOGLE_SEARCH_API_KEY=
GOOGLE_CSE_ID=
# YOUTUBE
#-----------------
YOUTUBE_API_KEY=
# SerpAPI
#-----------------
SERPAPI_API_KEY=
@@ -291,10 +289,6 @@ MEILI_NO_ANALYTICS=true
MEILI_HOST=http://0.0.0.0:7700
MEILI_MASTER_KEY=DrhYf7zENyR6AlUCKmnz0eYASOQdl6zxH7s7MKFSfFCt
# Optional: Disable indexing, useful in a multi-node setup
# where only one instance should perform an index sync.
# MEILI_NO_SYNC=true
#==================================================#
# Speech to Text & Text to Speech #
#==================================================#
@@ -392,22 +386,12 @@ FACEBOOK_CALLBACK_URL=/oauth/facebook/callback
GITHUB_CLIENT_ID=
GITHUB_CLIENT_SECRET=
GITHUB_CALLBACK_URL=/oauth/github/callback
# GitHub Enterprise
# GITHUB_ENTERPRISE_BASE_URL=
# GITHUB_ENTERPRISE_USER_AGENT=
# Google
GOOGLE_CLIENT_ID=
GOOGLE_CLIENT_SECRET=
GOOGLE_CALLBACK_URL=/oauth/google/callback
# Apple
APPLE_CLIENT_ID=
APPLE_TEAM_ID=
APPLE_KEY_ID=
APPLE_PRIVATE_KEY_PATH=
APPLE_CALLBACK_URL=/oauth/apple/callback
# OpenID
OPENID_CLIENT_ID=
OPENID_CLIENT_SECRET=
@@ -498,16 +482,6 @@ HELP_AND_FAQ_URL=https://librechat.ai
# Google tag manager id
#ANALYTICS_GTM_ID=user provided google tag manager id
#===============#
# REDIS Options #
#===============#
# REDIS_URI=10.10.10.10:6379
# USE_REDIS=true
# USE_REDIS_CLUSTER=true
# REDIS_CA=/path/to/ca.crt
#==================================================#
# Others #
#==================================================#
@@ -515,6 +489,9 @@ HELP_AND_FAQ_URL=https://librechat.ai
# NODE_ENV=
# REDIS_URI=
# USE_REDIS=
# E2E_USER_EMAIL=
# E2E_USER_PASSWORD=
@@ -532,9 +509,4 @@ HELP_AND_FAQ_URL=https://librechat.ai
# no-cache: Forces validation with server before using cached version
# no-store: Prevents storing the response entirely
# must-revalidate: Prevents using stale content when offline
#=====================================================#
# OpenWeather #
#=====================================================#
OPENWEATHER_API_KEY=
# must-revalidate: Prevents using stale content when offline

173
.eslintrc.js Normal file
View File

@@ -0,0 +1,173 @@
module.exports = {
env: {
browser: true,
es2021: true,
node: true,
commonjs: true,
es6: true,
},
extends: [
'eslint:recommended',
'plugin:react/recommended',
'plugin:react-hooks/recommended',
'plugin:jest/recommended',
'prettier',
'plugin:jsx-a11y/recommended',
],
ignorePatterns: [
'client/dist/**/*',
'client/public/**/*',
'e2e/playwright-report/**/*',
'packages/data-provider/types/**/*',
'packages/data-provider/dist/**/*',
'packages/data-provider/test_bundle/**/*',
'data-node/**/*',
'meili_data/**/*',
'node_modules/**/*',
],
parser: '@typescript-eslint/parser',
parserOptions: {
ecmaVersion: 'latest',
sourceType: 'module',
ecmaFeatures: {
jsx: true,
},
},
plugins: ['react', 'react-hooks', '@typescript-eslint', 'import', 'jsx-a11y'],
rules: {
'react/react-in-jsx-scope': 'off',
'@typescript-eslint/ban-ts-comment': ['error', { 'ts-ignore': 'allow' }],
indent: ['error', 2, { SwitchCase: 1 }],
'max-len': [
'error',
{
code: 120,
ignoreStrings: true,
ignoreTemplateLiterals: true,
ignoreComments: true,
},
],
'linebreak-style': 0,
curly: ['error', 'all'],
semi: ['error', 'always'],
'object-curly-spacing': ['error', 'always'],
'no-multiple-empty-lines': ['error', { max: 1 }],
'no-trailing-spaces': 'error',
'comma-dangle': ['error', 'always-multiline'],
// "arrow-parens": [2, "as-needed", { requireForBlockBody: true }],
// 'no-plusplus': ['error', { allowForLoopAfterthoughts: true }],
'no-console': 'off',
'import/no-cycle': 'error',
'import/no-self-import': 'error',
'import/extensions': 'off',
'no-promise-executor-return': 'off',
'no-param-reassign': 'off',
'no-continue': 'off',
'no-restricted-syntax': 'off',
'react/prop-types': ['off'],
'react/display-name': ['off'],
'no-nested-ternary': 'error',
'no-unused-vars': ['error', { varsIgnorePattern: '^_' }],
quotes: ['error', 'single'],
},
overrides: [
{
files: ['**/*.ts', '**/*.tsx'],
rules: {
'no-unused-vars': 'off', // off because it conflicts with '@typescript-eslint/no-unused-vars'
'react/display-name': 'off',
'@typescript-eslint/no-unused-vars': 'warn',
},
},
{
files: ['rollup.config.js', '.eslintrc.js', 'jest.config.js'],
env: {
node: true,
},
},
{
files: [
'**/*.test.js',
'**/*.test.jsx',
'**/*.test.ts',
'**/*.test.tsx',
'**/*.spec.js',
'**/*.spec.jsx',
'**/*.spec.ts',
'**/*.spec.tsx',
'setupTests.js',
],
env: {
jest: true,
node: true,
},
rules: {
'react/display-name': 'off',
'react/prop-types': 'off',
'react/no-unescaped-entities': 'off',
},
},
{
files: ['**/*.ts', '**/*.tsx'],
parser: '@typescript-eslint/parser',
parserOptions: {
project: './client/tsconfig.json',
},
plugins: ['@typescript-eslint/eslint-plugin', 'jest'],
extends: [
'plugin:@typescript-eslint/eslint-recommended',
'plugin:@typescript-eslint/recommended',
],
rules: {
'@typescript-eslint/no-explicit-any': 'error',
'@typescript-eslint/no-unnecessary-condition': 'warn',
'@typescript-eslint/strict-boolean-expressions': 'warn',
},
},
{
files: './packages/data-provider/**/*.ts',
overrides: [
{
files: '**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './packages/data-provider/tsconfig.json',
},
},
],
},
{
files: './config/translations/**/*.ts',
parser: '@typescript-eslint/parser',
parserOptions: {
project: './config/translations/tsconfig.json',
},
},
{
files: ['./packages/data-provider/specs/**/*.ts'],
parserOptions: {
project: './packages/data-provider/tsconfig.spec.json',
},
},
],
settings: {
react: {
createClass: 'createReactClass', // Regex for Component Factory to use,
// default to "createReactClass"
pragma: 'React', // Pragma to use, default to "React"
fragment: 'Fragment', // Fragment to use (may be a property of <pragma>), default to "Fragment"
version: 'detect', // React version. "detect" automatically picks the version you have installed.
},
'import/parsers': {
'@typescript-eslint/parser': ['.ts', '.tsx'],
},
'import/resolver': {
typescript: {
project: ['./client/tsconfig.json'],
},
node: {
project: ['./client/tsconfig.json'],
},
},
},
};

View File

@@ -1,19 +1,12 @@
name: Bug Report
description: File a bug report
title: "[Bug]: "
labels: ["🐛 bug"]
labels: ["bug"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill out this bug report!
Before submitting, please:
- Search existing [Issues and Discussions](https://github.com/danny-avila/LibreChat/discussions) to see if your bug has already been reported
- Use [Discussions](https://github.com/danny-avila/LibreChat/discussions) instead of Issues for:
- General inquiries
- Help with setup
- Questions about whether you're experiencing a bug
- type: textarea
id: what-happened
attributes:
@@ -22,23 +15,6 @@ body:
placeholder: Please give as many details as possible
validations:
required: true
- type: textarea
id: version-info
attributes:
label: Version Information
description: |
If using Docker, please run and provide the output of:
```bash
docker images | grep librechat
```
If running from source, please run and provide the output of:
```bash
git rev-parse HEAD
```
placeholder: Paste the output here
validations:
required: true
- type: textarea
id: steps-to-reproduce
attributes:
@@ -63,21 +39,7 @@ body:
id: logs
attributes:
label: Relevant log output
description: |
Please paste relevant logs that were created when reproducing the error.
Log locations:
- Docker: Project root directory ./logs
- npm: ./api/logs
There are two types of logs that can help diagnose the issue:
- debug logs (debug-YYYY-MM-DD.log)
- error logs (error-YYYY-MM-DD.log)
Error logs contain exact stack traces and are especially helpful, but both can provide valuable information.
Please only include the relevant portions of logs that correspond to when you reproduced the error.
For UI-related issues, browser console logs can be very helpful. You can provide these as screenshots or paste the text here.
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
render: shell
- type: textarea
id: screenshots
@@ -91,4 +53,4 @@ body:
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true
required: true

View File

@@ -1,7 +1,7 @@
name: Feature Request
description: File a feature request
title: "[Enhancement]: "
labels: ["enhancement"]
title: "Enhancement: "
labels: ["enhancement"]
body:
- type: markdown
attributes:

View File

@@ -1,42 +0,0 @@
name: Locize Translation Access Request
description: Request access to an additional language in Locize for LibreChat translations.
title: "Locize Access Request: "
labels: ["🌍 i18n", "🔑 access request"]
body:
- type: markdown
attributes:
value: |
Thank you for your interest in contributing to LibreChat translations!
Please fill out the form below to request access to an additional language in **Locize**.
**🔗 Available Languages:** [View the list here](https://www.librechat.ai/docs/translation)
**📌 Note:** Ensure that the requested language is supported before submitting your request.
- type: input
id: account_name
attributes:
label: Locize Account Name
description: Please provide your Locize account name (e.g., John Doe).
placeholder: e.g., John Doe
validations:
required: true
- type: input
id: language_requested
attributes:
label: Language Code (ISO 639-1)
description: |
Enter the **ISO 639-1** language code for the language you want to translate into.
Example: `es` for Spanish, `zh-Hant` for Traditional Chinese.
**🔗 Reference:** [Available Languages](https://www.librechat.ai/docs/translation)
placeholder: e.g., es
validations:
required: true
- type: checkboxes
id: agreement
attributes:
label: Agreement
description: By submitting this request, you confirm that you will contribute responsibly and adhere to the project guidelines.
options:
- label: I agree to use my access solely for contributing to LibreChat translations.
required: true

View File

@@ -1,33 +0,0 @@
name: New Language Request
description: Request to add a new language for LibreChat translations.
title: "New Language Request: "
labels: ["✨ enhancement", "🌍 i18n"]
body:
- type: markdown
attributes:
value: |
Thank you for taking the time to submit a new language request! Please fill out the following details so we can review your request.
- type: input
id: language_name
attributes:
label: Language Name
description: Please provide the full name of the language (e.g., Spanish, Mandarin).
placeholder: e.g., Spanish
validations:
required: true
- type: input
id: iso_code
attributes:
label: ISO 639-1 Code
description: Please provide the ISO 639-1 code for the language (e.g., es for Spanish). You can refer to [this list](https://www.w3schools.com/tags/ref_language_codes.asp) for valid codes.
placeholder: e.g., es
validations:
required: true
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md).
options:
- label: I agree to follow this project's Code of Conduct
required: true

50
.github/ISSUE_TEMPLATE/QUESTION.yml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: Question
description: Ask your question
title: "[Question]: "
labels: ["question"]
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to fill this!
- type: textarea
id: what-is-your-question
attributes:
label: What is your question?
description: Please give as many details as possible
placeholder: Please give as many details as possible
validations:
required: true
- type: textarea
id: more-details
attributes:
label: More Details
description: Please provide more details if needed.
placeholder: Please provide more details if needed.
validations:
required: true
- type: dropdown
id: browsers
attributes:
label: What is the main subject of your question?
multiple: true
options:
- Documentation
- Installation
- UI
- Endpoints
- User System/OAuth
- Other
- type: textarea
id: screenshots
attributes:
label: Screenshots
description: If applicable, add screenshots to help explain your problem. You can drag and drop, paste images directly here or link to them.
- type: checkboxes
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

View File

@@ -1,60 +0,0 @@
{
"categories": [
{
"title": "### ✨ New Features",
"labels": ["feat"]
},
{
"title": "### 🌍 Internationalization",
"labels": ["i18n"]
},
{
"title": "### 👐 Accessibility",
"labels": ["a11y"]
},
{
"title": "### 🔧 Fixes",
"labels": ["Fix", "fix"]
},
{
"title": "### ⚙️ Other Changes",
"labels": ["ci", "style", "docs", "refactor", "chore"]
}
],
"ignore_labels": [
"🔁 duplicate",
"📊 analytics",
"🌱 good first issue",
"🔍 investigation",
"🙏 help wanted",
"❌ invalid",
"❓ question",
"🚫 wontfix",
"🚀 release",
"version"
],
"base_branches": ["main"],
"sort": {
"order": "ASC",
"on_property": "mergedAt"
},
"label_extractor": [
{
"pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
"target": "$1",
"flags": "i",
"on_property": "title",
"method": "match"
},
{
"pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
"target": "version",
"flags": "i",
"on_property": "title",
"method": "match"
}
],
"template": "## [#{{TO_TAG}}] - #{{TO_TAG_DATE}}\n\nChanges from #{{FROM_TAG}} to #{{TO_TAG}}.\n\n#{{CHANGELOG}}\n\n[See full release details][release-#{{TO_TAG}}]\n\n[release-#{{TO_TAG}}]: https://github.com/#{{OWNER}}/#{{REPO}}/releases/tag/#{{TO_TAG}}\n\n---",
"pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
"empty_template": "- no changes"
}

View File

@@ -1,68 +0,0 @@
{
"categories": [
{
"title": "### ✨ New Features",
"labels": ["feat"]
},
{
"title": "### 🌍 Internationalization",
"labels": ["i18n"]
},
{
"title": "### 👐 Accessibility",
"labels": ["a11y"]
},
{
"title": "### 🔧 Fixes",
"labels": ["Fix", "fix"]
},
{
"title": "### ⚙️ Other Changes",
"labels": ["ci", "style", "docs", "refactor", "chore"]
}
],
"ignore_labels": [
"🔁 duplicate",
"📊 analytics",
"🌱 good first issue",
"🔍 investigation",
"🙏 help wanted",
"❌ invalid",
"❓ question",
"🚫 wontfix",
"🚀 release",
"version",
"action"
],
"base_branches": ["main"],
"sort": {
"order": "ASC",
"on_property": "mergedAt"
},
"label_extractor": [
{
"pattern": "^(?:[^A-Za-z0-9]*)(feat|fix|chore|docs|refactor|ci|style|a11y|i18n)\\s*:",
"target": "$1",
"flags": "i",
"on_property": "title",
"method": "match"
},
{
"pattern": "^(?:[^A-Za-z0-9]*)(v\\d+\\.\\d+\\.\\d+(?:-rc\\d+)?).*",
"target": "version",
"flags": "i",
"on_property": "title",
"method": "match"
},
{
"pattern": "^(?:[^A-Za-z0-9]*)(action)\\b.*",
"target": "action",
"flags": "i",
"on_property": "title",
"method": "match"
}
],
"template": "## [Unreleased]\n\n#{{CHANGELOG}}\n\n---",
"pr_template": "- #{{TITLE}} by **@#{{AUTHOR}}** in [##{{NUMBER}}](#{{URL}})",
"empty_template": "- no changes"
}

View File

@@ -33,12 +33,9 @@ jobs:
- name: Install dependencies
run: npm ci
- name: Install Data Provider Package
- name: Install Data Provider
run: npm run build:data-provider
- name: Install MCP Package
run: npm run build:mcp
- name: Create empty auth.json file
run: |
mkdir -p api/data
@@ -61,4 +58,9 @@ jobs:
run: cd api && npm run test:ci
- name: Run librechat-data-provider unit tests
run: cd packages/data-provider && npm run test:ci
run: cd packages/data-provider && npm run test:ci
- name: Run linters
uses: wearerequired/lint-action@v2
with:
eslint: true

View File

@@ -1,73 +0,0 @@
name: ESLint Code Quality Checks
on:
pull_request:
branches:
- main
- dev
- release/*
paths:
- 'api/**'
- 'client/**'
jobs:
eslint_checks:
name: Run ESLint Linting
runs-on: ubuntu-latest
permissions:
contents: read
security-events: write
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Node.js 20.x
uses: actions/setup-node@v4
with:
node-version: 20
cache: npm
- name: Install dependencies
run: npm ci
# Run ESLint on changed files within the api/ and client/ directories.
- name: Run ESLint on changed files
env:
SARIF_ESLINT_IGNORE_SUPPRESSED: "true"
run: |
# Extract the base commit SHA from the pull_request event payload.
BASE_SHA=$(jq --raw-output .pull_request.base.sha "$GITHUB_EVENT_PATH")
echo "Base commit SHA: $BASE_SHA"
# Get changed files (only JS/TS files in api/ or client/)
CHANGED_FILES=$(git diff --name-only --diff-filter=ACMRTUXB "$BASE_SHA" HEAD | grep -E '^(api|client)/.*\.(js|jsx|ts|tsx)$' || true)
# Debug output
echo "Changed files:"
echo "$CHANGED_FILES"
# Ensure there are files to lint before running ESLint
if [[ -z "$CHANGED_FILES" ]]; then
echo "No matching files changed. Skipping ESLint."
echo "UPLOAD_SARIF=false" >> $GITHUB_ENV
exit 0
fi
# Set variable to allow SARIF upload
echo "UPLOAD_SARIF=true" >> $GITHUB_ENV
# Run ESLint
npx eslint --no-error-on-unmatched-pattern \
--config eslint.config.mjs \
--format @microsoft/eslint-formatter-sarif \
--output-file eslint-results.sarif $CHANGED_FILES || true
- name: Upload analysis results to GitHub
if: env.UPLOAD_SARIF == 'true'
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: eslint-results.sarif
wait-for-processing: true

View File

@@ -1,94 +0,0 @@
name: Generate Release Changelog PR
on:
push:
tags:
- 'v*.*.*'
jobs:
generate-release-changelog-pr:
permissions:
contents: write # Needed for pushing commits and creating branches.
pull-requests: write
runs-on: ubuntu-latest
steps:
# 1. Checkout the repository (with full history).
- name: Checkout Repository
uses: actions/checkout@v4
with:
fetch-depth: 0
# 2. Generate the release changelog using our custom configuration.
- name: Generate Release Changelog
id: generate_release
uses: mikepenz/release-changelog-builder-action@v5.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
configuration: ".github/configuration-release.json"
owner: ${{ github.repository_owner }}
repo: ${{ github.event.repository.name }}
outputFile: CHANGELOG-release.md
# 3. Update the main CHANGELOG.md:
# - If it doesn't exist, create it with a basic header.
# - Remove the "Unreleased" section (if present).
# - Prepend the new release changelog above previous releases.
# - Remove all temporary files before committing.
- name: Update CHANGELOG.md
run: |
# Determine the release tag, e.g. "v1.2.3"
TAG=${GITHUB_REF##*/}
echo "Using release tag: $TAG"
# Ensure CHANGELOG.md exists; if not, create a basic header.
if [ ! -f CHANGELOG.md ]; then
echo "# Changelog" > CHANGELOG.md
echo "" >> CHANGELOG.md
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
echo "" >> CHANGELOG.md
fi
echo "Updating CHANGELOG.md…"
# Remove the "Unreleased" section (from "## [Unreleased]" until the first occurrence of '---') if it exists.
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
awk '/^## \[Unreleased\]/{flag=1} flag && /^---/{flag=0; next} !flag' CHANGELOG.md > CHANGELOG.cleaned
else
cp CHANGELOG.md CHANGELOG.cleaned
fi
# Split the cleaned file into:
# - header.md: content before the first release header ("## [v...").
# - tail.md: content from the first release header onward.
awk '/^## \[v/{exit} {print}' CHANGELOG.cleaned > header.md
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.cleaned > tail.md
# Combine header, the new release changelog, and the tail.
echo "Combining updated changelog parts..."
cat header.md CHANGELOG-release.md > CHANGELOG.md.new
echo "" >> CHANGELOG.md.new
cat tail.md >> CHANGELOG.md.new
mv CHANGELOG.md.new CHANGELOG.md
# Remove temporary files.
rm -f CHANGELOG.cleaned header.md tail.md CHANGELOG-release.md
echo "Final CHANGELOG.md content:"
cat CHANGELOG.md
# 4. Create (or update) the Pull Request with the updated CHANGELOG.md.
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
commit-message: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
base: main
branch: "changelog/${GITHUB_REF##*/}"
reviewers: danny-avila
title: "chore: update CHANGELOG for release ${GITHUB_REF##*/}"
body: |
**Description**:
- This PR updates the CHANGELOG.md by removing the "Unreleased" section and adding new release notes for release ${GITHUB_REF##*/} above previous releases.

View File

@@ -1,106 +0,0 @@
name: Generate Unreleased Changelog PR
on:
schedule:
- cron: "0 0 * * 1" # Runs every Monday at 00:00 UTC
jobs:
generate-unreleased-changelog-pr:
permissions:
contents: write # Needed for pushing commits and creating branches.
pull-requests: write
runs-on: ubuntu-latest
steps:
# 1. Checkout the repository on main.
- name: Checkout Repository on Main
uses: actions/checkout@v4
with:
ref: main
fetch-depth: 0
# 4. Get the latest version tag.
- name: Get Latest Tag
id: get_latest_tag
run: |
LATEST_TAG=$(git describe --tags $(git rev-list --tags --max-count=1) || echo "none")
echo "Latest tag: $LATEST_TAG"
echo "tag=$LATEST_TAG" >> $GITHUB_OUTPUT
# 5. Generate the Unreleased changelog.
- name: Generate Unreleased Changelog
id: generate_unreleased
uses: mikepenz/release-changelog-builder-action@v5.1.0
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
configuration: ".github/configuration-unreleased.json"
owner: ${{ github.repository_owner }}
repo: ${{ github.event.repository.name }}
outputFile: CHANGELOG-unreleased.md
fromTag: ${{ steps.get_latest_tag.outputs.tag }}
toTag: main
# 7. Update CHANGELOG.md with the new Unreleased section.
- name: Update CHANGELOG.md
id: update_changelog
run: |
# Create CHANGELOG.md if it doesn't exist.
if [ ! -f CHANGELOG.md ]; then
echo "# Changelog" > CHANGELOG.md
echo "" >> CHANGELOG.md
echo "All notable changes to this project will be documented in this file." >> CHANGELOG.md
echo "" >> CHANGELOG.md
fi
echo "Updating CHANGELOG.md…"
# Extract content before the "## [Unreleased]" (or first version header if missing).
if grep -q "^## \[Unreleased\]" CHANGELOG.md; then
awk '/^## \[Unreleased\]/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
else
awk '/^## \[v/{exit} {print}' CHANGELOG.md > CHANGELOG_TMP.md
fi
# Append the generated Unreleased changelog.
echo "" >> CHANGELOG_TMP.md
cat CHANGELOG-unreleased.md >> CHANGELOG_TMP.md
echo "" >> CHANGELOG_TMP.md
# Append the remainder of the original changelog (starting from the first version header).
awk 'f{print} /^## \[v/{f=1; print}' CHANGELOG.md >> CHANGELOG_TMP.md
# Replace the old file with the updated file.
mv CHANGELOG_TMP.md CHANGELOG.md
# Remove the temporary generated file.
rm -f CHANGELOG-unreleased.md
echo "Final CHANGELOG.md:"
cat CHANGELOG.md
# 8. Check if CHANGELOG.md has any updates.
- name: Check for CHANGELOG.md changes
id: changelog_changes
run: |
if git diff --quiet CHANGELOG.md; then
echo "has_changes=false" >> $GITHUB_OUTPUT
else
echo "has_changes=true" >> $GITHUB_OUTPUT
fi
# 9. Create (or update) the Pull Request only if there are changes.
- name: Create Pull Request
if: steps.changelog_changes.outputs.has_changes == 'true'
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
base: main
branch: "changelog/unreleased-update"
sign-commits: true
commit-message: "action: update Unreleased changelog"
title: "action: update Unreleased changelog"
body: |
**Description**:
- This PR updates the Unreleased section in CHANGELOG.md.
- It compares the current main branch with the latest version tag (determined as ${{ steps.get_latest_tag.outputs.tag }}),
regenerates the Unreleased changelog, removes any old Unreleased block, and inserts the new content.

View File

@@ -1,93 +0,0 @@
name: Detect Unused i18next Strings
on:
pull_request:
paths:
- "client/src/**"
- "api/**"
jobs:
detect-unused-i18n-keys:
runs-on: ubuntu-latest
permissions:
pull-requests: write # Required for posting PR comments
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Find unused i18next keys
id: find-unused
run: |
echo "🔍 Scanning for unused i18next keys..."
# Define paths
I18N_FILE="client/src/locales/en/translation.json"
SOURCE_DIRS=("client/src" "api")
# Check if translation file exists
if [[ ! -f "$I18N_FILE" ]]; then
echo "::error title=Missing i18n File::Translation file not found: $I18N_FILE"
exit 1
fi
# Extract all keys from the JSON file
KEYS=$(jq -r 'keys[]' "$I18N_FILE")
# Track unused keys
UNUSED_KEYS=()
# Check if each key is used in the source code
for KEY in $KEYS; do
FOUND=false
for DIR in "${SOURCE_DIRS[@]}"; do
if grep -r --include=\*.{js,jsx,ts,tsx} -q "$KEY" "$DIR"; then
FOUND=true
break
fi
done
if [[ "$FOUND" == false ]]; then
UNUSED_KEYS+=("$KEY")
fi
done
# Output results
if [[ ${#UNUSED_KEYS[@]} -gt 0 ]]; then
echo "🛑 Found ${#UNUSED_KEYS[@]} unused i18n keys:"
echo "unused_keys=$(echo "${UNUSED_KEYS[@]}" | jq -R -s -c 'split(" ")')" >> $GITHUB_ENV
for KEY in "${UNUSED_KEYS[@]}"; do
echo "::warning title=Unused i18n Key::'$KEY' is defined but not used in the codebase."
done
else
echo "✅ No unused i18n keys detected!"
echo "unused_keys=[]" >> $GITHUB_ENV
fi
- name: Post verified comment on PR
if: env.unused_keys != '[]'
run: |
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
# Format the unused keys list as checkboxes for easy manual checking.
FILTERED_KEYS=$(echo "$unused_keys" | jq -r '.[]' | grep -v '^\s*$' | sed 's/^/- [ ] `/;s/$/`/' )
COMMENT_BODY=$(cat <<EOF
### 🚨 Unused i18next Keys Detected
The following translation keys are defined in \`translation.json\` but are **not used** in the codebase:
$FILTERED_KEYS
⚠️ **Please remove these unused keys to keep the translation files clean.**
EOF
)
gh api "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \
-f body="$COMMENT_BODY" \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fail workflow if unused keys found
if: env.unused_keys != '[]'
run: exit 1

View File

@@ -1,72 +0,0 @@
name: Sync Locize Translations & Create Translation PR
on:
push:
branches: [main]
repository_dispatch:
types: [locize/versionPublished]
jobs:
sync-translations:
name: Sync Translation Keys with Locize
runs-on: ubuntu-latest
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set Up Node.js
uses: actions/setup-node@v4
with:
node-version: 20
- name: Install locize CLI
run: npm install -g locize-cli
# Sync translations (Push missing keys & remove deleted ones)
- name: Sync Locize with Repository
if: ${{ github.event_name == 'push' }}
run: |
cd client/src/locales
locize sync --api-key ${{ secrets.LOCIZE_API_KEY }} --project-id ${{ secrets.LOCIZE_PROJECT_ID }} --language en
# When triggered by repository_dispatch, skip sync step.
- name: Skip sync step on non-push events
if: ${{ github.event_name != 'push' }}
run: echo "Skipping sync as the event is not a push."
create-pull-request:
name: Create Translation PR on Version Published
runs-on: ubuntu-latest
needs: sync-translations
permissions:
contents: write
pull-requests: write
steps:
# 1. Check out the repository.
- name: Checkout Repository
uses: actions/checkout@v4
# 2. Download translation files from locize.
- name: Download Translations from locize
uses: locize/download@v1
with:
project-id: ${{ secrets.LOCIZE_PROJECT_ID }}
path: "client/src/locales"
# 3. Create a Pull Request using built-in functionality.
- name: Create Pull Request
uses: peter-evans/create-pull-request@v7
with:
token: ${{ secrets.GITHUB_TOKEN }}
sign-commits: true
commit-message: "🌍 i18n: Update translation.json with latest translations"
base: main
branch: i18n/locize-translation-update
reviewers: danny-avila
title: "🌍 i18n: Update translation.json with latest translations"
body: |
**Description**:
- 🎯 **Objective**: Update `translation.json` with the latest translations from locize.
- 🔍 **Details**: This PR is automatically generated upon receiving a versionPublished event with version "latest". It reflects the newest translations provided by locize.
- ✅ **Status**: Ready for review.
labels: "🌍 i18n"

View File

@@ -1,153 +0,0 @@
name: Detect Unused NPM Packages
on:
pull_request:
paths:
- 'package.json'
- 'package-lock.json'
- 'client/**'
- 'api/**'
jobs:
detect-unused-packages:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: actions/checkout@v4
- name: Use Node.js 20.x
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'npm'
- name: Install depcheck
run: npm install -g depcheck
- name: Validate JSON files
run: |
for FILE in package.json client/package.json api/package.json; do
if [[ -f "$FILE" ]]; then
jq empty "$FILE" || (echo "::error title=Invalid JSON::$FILE is invalid" && exit 1)
fi
done
- name: Extract Dependencies Used in Scripts
id: extract-used-scripts
run: |
extract_deps_from_scripts() {
local package_file=$1
if [[ -f "$package_file" ]]; then
jq -r '.scripts | to_entries[].value' "$package_file" | \
grep -oE '([a-zA-Z0-9_-]+)' | sort -u > used_scripts.txt
else
touch used_scripts.txt
fi
}
extract_deps_from_scripts "package.json"
mv used_scripts.txt root_used_deps.txt
extract_deps_from_scripts "client/package.json"
mv used_scripts.txt client_used_deps.txt
extract_deps_from_scripts "api/package.json"
mv used_scripts.txt api_used_deps.txt
- name: Extract Dependencies Used in Source Code
id: extract-used-code
run: |
extract_deps_from_code() {
local folder=$1
local output_file=$2
if [[ -d "$folder" ]]; then
grep -rEho "require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)" "$folder" --include=\*.{js,ts,mjs,cjs} | \
sed -E "s/require\\(['\"]([a-zA-Z0-9@/._-]+)['\"]\\)/\1/" > "$output_file"
grep -rEho "import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]" "$folder" --include=\*.{js,ts,mjs,cjs} | \
sed -E "s/import .* from ['\"]([a-zA-Z0-9@/._-]+)['\"]/\1/" >> "$output_file"
sort -u "$output_file" -o "$output_file"
else
touch "$output_file"
fi
}
extract_deps_from_code "." root_used_code.txt
extract_deps_from_code "client" client_used_code.txt
extract_deps_from_code "api" api_used_code.txt
- name: Run depcheck for root package.json
id: check-root
run: |
if [[ -f "package.json" ]]; then
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat root_used_deps.txt root_used_code.txt | sort) || echo "")
echo "ROOT_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
fi
- name: Run depcheck for client/package.json
id: check-client
run: |
if [[ -f "client/package.json" ]]; then
chmod -R 755 client
cd client
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../client_used_deps.txt ../client_used_code.txt | sort) || echo "")
echo "CLIENT_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
cd ..
fi
- name: Run depcheck for api/package.json
id: check-api
run: |
if [[ -f "api/package.json" ]]; then
chmod -R 755 api
cd api
UNUSED=$(depcheck --json | jq -r '.dependencies | join("\n")' || echo "")
UNUSED=$(comm -23 <(echo "$UNUSED" | sort) <(cat ../api_used_deps.txt ../api_used_code.txt | sort) || echo "")
echo "API_UNUSED<<EOF" >> $GITHUB_ENV
echo "$UNUSED" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
cd ..
fi
- name: Post comment on PR if unused dependencies are found
if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
run: |
PR_NUMBER=$(jq --raw-output .pull_request.number "$GITHUB_EVENT_PATH")
ROOT_LIST=$(echo "$ROOT_UNUSED" | awk '{print "- `" $0 "`"}')
CLIENT_LIST=$(echo "$CLIENT_UNUSED" | awk '{print "- `" $0 "`"}')
API_LIST=$(echo "$API_UNUSED" | awk '{print "- `" $0 "`"}')
COMMENT_BODY=$(cat <<EOF
### 🚨 Unused NPM Packages Detected
The following **unused dependencies** were found:
$(if [[ ! -z "$ROOT_UNUSED" ]]; then echo "#### 📂 Root \`package.json\`"; echo ""; echo "$ROOT_LIST"; echo ""; fi)
$(if [[ ! -z "$CLIENT_UNUSED" ]]; then echo "#### 📂 Client \`client/package.json\`"; echo ""; echo "$CLIENT_LIST"; echo ""; fi)
$(if [[ ! -z "$API_UNUSED" ]]; then echo "#### 📂 API \`api/package.json\`"; echo ""; echo "$API_LIST"; echo ""; fi)
⚠️ **Please remove these unused dependencies to keep your project clean.**
EOF
)
gh api "repos/${{ github.repository }}/issues/${PR_NUMBER}/comments" \
-f body="$COMMENT_BODY" \
-H "Authorization: token ${{ secrets.GITHUB_TOKEN }}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fail workflow if unused dependencies found
if: env.ROOT_UNUSED != '' || env.CLIENT_UNUSED != '' || env.API_UNUSED != ''
run: exit 1

3
.gitignore vendored
View File

@@ -105,5 +105,4 @@ auth.json
uploads/
# owner
release/
!/client/src/@types/i18next.d.ts
release/

View File

@@ -1,19 +0,0 @@
{
"tailwindConfig": "./client/tailwind.config.mjs",
"printWidth": 100,
"tabWidth": 2,
"useTabs": false,
"semi": true,
"singleQuote": true,
"trailingComma": "all",
"arrowParens": "always",
"embeddedLanguageFormatting": "auto",
"insertPragma": false,
"proseWrap": "preserve",
"quoteProps": "as-needed",
"requirePragma": false,
"rangeStart": 0,
"endOfLine": "auto",
"jsxSingleQuote": false,
"plugins": ["prettier-plugin-tailwindcss"]
}

3
.vscode/launch.json vendored
View File

@@ -10,8 +10,7 @@
"env": {
"NODE_ENV": "production"
},
"console": "integratedTerminal",
"envFile": "${workspaceFolder}/.env"
"console": "integratedTerminal"
}
]
}

View File

@@ -1,4 +1,4 @@
# v0.7.7-rc1
# v0.7.5
# Base node image
FROM node:20-alpine AS node

View File

@@ -1,8 +1,8 @@
# Dockerfile.multi
# v0.7.7-rc1
# v0.7.5
# Base for all builds
FROM node:20-alpine AS base-min
FROM node:20-alpine AS base
WORKDIR /app
RUN apk --no-cache add curl
RUN npm config set fetch-retry-maxtimeout 600000 && \
@@ -10,13 +10,8 @@ RUN npm config set fetch-retry-maxtimeout 600000 && \
npm config set fetch-retry-mintimeout 15000
COPY package*.json ./
COPY packages/data-provider/package*.json ./packages/data-provider/
COPY packages/mcp/package*.json ./packages/mcp/
COPY client/package*.json ./client/
COPY api/package*.json ./api/
# Install all dependencies for every build
FROM base-min AS base
WORKDIR /app
RUN npm ci
# Build data-provider
@@ -24,13 +19,7 @@ FROM base AS data-provider-build
WORKDIR /app/packages/data-provider
COPY packages/data-provider ./
RUN npm run build
# Build mcp package
FROM base AS mcp-build
WORKDIR /app/packages/mcp
COPY packages/mcp ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
RUN npm run build
RUN npm prune --production
# Client build
FROM base AS client-build
@@ -39,18 +28,17 @@ COPY client ./
COPY --from=data-provider-build /app/packages/data-provider/dist /app/packages/data-provider/dist
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build
RUN npm prune --production
# API setup (including client dist)
FROM base-min AS api-build
FROM base AS api-build
WORKDIR /app
# Install only production deps
RUN npm ci --omit=dev
COPY api ./api
COPY config ./config
COPY --from=data-provider-build /app/packages/data-provider/dist ./packages/data-provider/dist
COPY --from=mcp-build /app/packages/mcp/dist ./packages/mcp/dist
COPY --from=client-build /app/client/dist ./client/dist
WORKDIR /app/api
RUN npm prune --production
EXPOSE 3080
ENV HOST=0.0.0.0
CMD ["node", "server/index.js"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2025 LibreChat
Copyright (c) 2024 LibreChat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

124
README.md
View File

@@ -38,85 +38,42 @@
</a>
</p>
<p align="center">
<a href="https://www.librechat.ai/docs/translation">
<img
src="https://img.shields.io/badge/dynamic/json.svg?style=for-the-badge&color=2096F3&label=locize&query=%24.translatedPercentage&url=https://api.locize.app/badgedata/4cb2598b-ed4d-469c-9b04-2ed531a8cb45&suffix=%+translated"
alt="Translation Progress">
</a>
</p>
# 📃 Features
# ✨ Features
- 🖥️ **UI & Experience** inspired by ChatGPT with enhanced design and features
- 🤖 **AI Model Selection**:
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, Google, Vertex AI, OpenAI Assistants API (incl. Azure)
- [Custom Endpoints](https://www.librechat.ai/docs/quick_start/custom_endpoints): Use any OpenAI-compatible API with LibreChat, no proxy required
- Compatible with [Local & Remote AI Providers](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):
- Ollama, groq, Cohere, Mistral AI, Apple MLX, koboldcpp, together.ai,
- OpenRouter, Perplexity, ShuttleAI, Deepseek, Qwen, and more
- 🔧 **[Code Interpreter API](https://www.librechat.ai/docs/features/code_interpreter)**:
- Secure, Sandboxed Execution in Python, Node.js (JS/TS), Go, C/C++, Java, PHP, Rust, and Fortran
- Seamless File Handling: Upload, process, and download files directly
- No Privacy Concerns: Fully isolated and secure execution
- 🔦 **Agents & Tools Integration**:
- **[LibreChat Agents](https://www.librechat.ai/docs/features/agents)**:
- No-Code Custom Assistants: Build specialized, AI-driven helpers without coding
- Flexible & Extensible: Attach tools like DALL-E-3, file search, code execution, and more
- Compatible with Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, and more
- [Model Context Protocol (MCP) Support](https://modelcontextprotocol.io/clients#librechat) for Tools
- Use LibreChat Agents and OpenAI Assistants with Files, Code Interpreter, Tools, and API Actions
- 🪄 **Generative UI with Code Artifacts**:
- [Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3) allow creation of React, HTML, and Mermaid diagrams directly in chat
- 💾 **Presets & Context Management**:
- Create, Save, & Share Custom Presets
- Switch between AI Endpoints and Presets mid-chat
- Edit, Resubmit, and Continue Messages with Conversation branching
- [Fork Messages & Conversations](https://www.librechat.ai/docs/features/fork) for Advanced Context control
- 💬 **Multimodal & File Interactions**:
- Upload and analyze images with Claude 3, GPT-4o, o1, Llama-Vision, and Gemini 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, AWS Bedrock, & Google 🗃️
- 🌎 **Multilingual UI**:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
- 🤖 AI model selection:
- Anthropic (Claude), AWS Bedrock, OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Plugins, Assistants API (including Azure Assistants)
- ✅ Compatible across both **[Remote & Local AI services](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):**
- groq, Ollama, Cohere, Mistral AI, Apple MLX, koboldcpp, OpenRouter, together.ai, Perplexity, ShuttleAI, and more
- 🪄 Generative UI with **[Code Artifacts](https://youtu.be/GfTj7O4gmd0?si=WJbdnemZpJzBrJo3)**
- Create React, HTML code, and Mermaid diagrams right in chat
- 💾 Create, Save, & Share Custom Presets
- 🔀 Switch between AI Endpoints and Presets, mid-chat
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
- 🌿 Fork Messages & Conversations for Advanced Context control
- 💬 Multimodal Chat:
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o` and `gpt-4o-mini`), and Gemini Vision 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
- Non-OpenAI Agents in Active Development 🚧
- 🌎 Multilingual UI:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
- 🧠 **Reasoning UI**:
- Dynamic Reasoning UI for Chain-of-Thought/Reasoning AI models like DeepSeek-R1
- 🎨 **Customizable Interface**:
- Customizable Dropdown & Interface that adapts to both power users and newcomers
- 🗣️ **Speech & Audio**:
- Chat hands-free with Speech-to-Text and Text-to-Speech
- Automatically send and play Audio
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers
- 📧 Verify your email to ensure secure access
- 🗣️ Chat hands-free with Speech-to-Text and Text-to-Speech magic
- Automatically send and play Audio
- Supports OpenAI, Azure OpenAI, and Elevenlabs
- 📥 **Import & Export Conversations**:
- Import Conversations from LibreChat, ChatGPT, Chatbot UI
- Export conversations as screenshots, markdown, text, json
- 🔍 **Search & Discovery**:
- Search all messages/conversations
- 👥 **Multi-User & Secure Access**:
- Multi-User, Secure Authentication with OAuth2, LDAP, & Email Login Support
- Built-in Moderation, and Token spend tools
- ⚙️ **Configuration & Deployment**:
- Configure Proxy, Reverse Proxy, Docker, & many Deployment options
- 📥 Import Conversations from LibreChat, ChatGPT, Chatbot UI
- 📤 Export conversations as screenshots, markdown, text, json
- 🔍 Search all messages/conversations
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options:
- Use completely local or deploy on the cloud
- 📖 **Open-Source & Community**:
- Completely Open-Source & Built in Public
- Community-driven development, support, and feedback
- 📖 Completely Open-Source & Built in Public
- 🧑‍🤝‍🧑 Community-driven development, support, and feedback
[For a thorough review of our features, see our docs here](https://docs.librechat.ai/) 📚
@@ -126,8 +83,7 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.6.gif)](https://www.youtube.com/watch?v=ilfwGQtJNlI)
[![Watch the video](https://raw.githubusercontent.com/LibreChat-AI/librechat.ai/main/public/images/changelog/v0.7.5.png)](https://www.youtube.com/watch?v=IDukQ7a2f3U)
Click on the thumbnail to open the video☝
---
@@ -179,8 +135,6 @@ Contributions, suggestions, bug reports and fixes are welcome!
For new features, components, or extensions, please open an issue and discuss before sending a PR.
If you'd like to help translate LibreChat into your language, we'd love your contribution! Improving our translations not only makes LibreChat more accessible to users around the world but also enhances the overall user experience. Please check out our [Translation Guide](https://www.librechat.ai/docs/translation).
---
## 💖 This project exists in its current state thanks to all the people who contribute
@@ -188,15 +142,3 @@ If you'd like to help translate LibreChat into your language, we'd love your con
<a href="https://github.com/danny-avila/LibreChat/graphs/contributors">
<img src="https://contrib.rocks/image?repo=danny-avila/LibreChat" />
</a>
---
## 🎉 Special Thanks
We thank [Locize](https://locize.com) for their translation management tools that support multiple languages in LibreChat.
<p align="center">
<a href="https://locize.com" target="_blank" rel="noopener noreferrer">
<img src="https://locize.com/img/locize_color.svg" alt="Locize Logo" height="50">
</a>
</p>

112
api/app/bingai.js Normal file
View File

@@ -0,0 +1,112 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { logger } = require('~/config');
const askBing = async ({
text,
parentMessageId,
conversationId,
jailbreak,
jailbreakConversationId,
context,
systemMessage,
conversationSignature,
clientId,
invocationId,
toneStyle,
key: expiresAt,
onProgress,
userId,
}) => {
const isUserProvided = process.env.BINGAI_TOKEN === 'user_provided';
let key = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bingAI);
key = await getUserKey({ userId, name: 'bingAI' });
}
const { BingAIClient } = await import('nodejs-gpt');
const store = {
store: new KeyvFile({ filename: './data/cache.json' }),
};
const bingAIClient = new BingAIClient({
// "_U" cookie from bing.com
// userToken:
// isUserProvided ? key : process.env.BINGAI_TOKEN ?? null,
// If the above doesn't work, provide all your cookies as a string instead
cookies: isUserProvided ? key : process.env.BINGAI_TOKEN ?? null,
debug: false,
cache: store,
host: process.env.BINGAI_HOST || null,
proxy: process.env.PROXY || null,
});
let options = {};
if (jailbreakConversationId == 'false') {
jailbreakConversationId = false;
}
if (jailbreak) {
options = {
jailbreakConversationId: jailbreakConversationId || jailbreak,
context,
systemMessage,
parentMessageId,
toneStyle,
onProgress,
clientOptions: {
features: {
genImage: {
server: {
enable: true,
type: 'markdown_list',
},
},
},
},
};
} else {
options = {
conversationId,
context,
systemMessage,
parentMessageId,
toneStyle,
onProgress,
clientOptions: {
features: {
genImage: {
server: {
enable: true,
type: 'markdown_list',
},
},
},
},
};
// don't give those parameters for new conversation
// for new conversation, conversationSignature always is null
if (conversationSignature) {
options.encryptedConversationSignature = conversationSignature;
options.clientId = clientId;
options.invocationId = invocationId;
}
}
logger.debug('bing options', options);
const res = await bingAIClient.sendMessage(text, options);
return res;
// for reference:
// https://github.com/waylaidwanderer/node-chatgpt-api/blob/main/demos/use-bing-client.js
};
module.exports = { askBing };

View File

@@ -0,0 +1,57 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { Constants, EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
const browserClient = async ({
text,
parentMessageId,
conversationId,
model,
key: expiresAt,
onProgress,
onEventMessage,
abortController,
userId,
}) => {
const isUserProvided = process.env.CHATGPT_TOKEN === 'user_provided';
let key = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.chatGPTBrowser);
key = await getUserKey({ userId, name: 'chatGPTBrowser' });
}
const { ChatGPTBrowserClient } = await import('nodejs-gpt');
const store = {
store: new KeyvFile({ filename: './data/cache.json' }),
};
const clientOptions = {
// Warning: This will expose your access token to a third party. Consider the risks before using this.
reverseProxyUrl:
process.env.CHATGPT_REVERSE_PROXY ?? 'https://ai.fakeopen.com/api/conversation',
// Access token from https://chat.openai.com/api/auth/session
accessToken: isUserProvided ? key : process.env.CHATGPT_TOKEN ?? null,
model: model,
debug: false,
proxy: process.env.PROXY ?? null,
user: userId,
};
const client = new ChatGPTBrowserClient(clientOptions, store);
let options = { onProgress, onEventMessage, abortController };
if (!!parentMessageId && !!conversationId) {
options = { ...options, parentMessageId, conversationId };
}
if (parentMessageId === Constants.NO_PARENT) {
delete options.conversationId;
}
const res = await client.sendMessage(text, options);
return res;
};
module.exports = { browserClient };

View File

@@ -1,5 +1,6 @@
const Anthropic = require('@anthropic-ai/sdk');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
Constants,
EModelEndpoint,
@@ -7,7 +8,7 @@ const {
getResponseSender,
validateVisionModel,
} = require('librechat-data-provider');
const { SplitStreamHandler: _Handler, GraphEvents } = require('@librechat/agents');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const {
truncateText,
formatMessage,
@@ -16,30 +17,16 @@ const {
parseParamFromPrompt,
createContextHandlers,
} = require('./prompts');
const {
getClaudeHeaders,
configureReasoning,
checkPromptCacheSupport,
} = require('~/server/services/Endpoints/anthropic/helpers');
const { getModelMaxTokens, getModelMaxOutputTokens, matchModelName } = require('~/utils');
const { spendTokens, spendStructuredTokens } = require('~/models/spendTokens');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const Tokenizer = require('~/server/services/Tokenizer');
const { logger, sendEvent } = require('~/config');
const { sleep } = require('~/server/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
const HUMAN_PROMPT = '\n\nHuman:';
const AI_PROMPT = '\n\nAssistant:';
class SplitStreamHandler extends _Handler {
getDeltaContent(chunk) {
return (chunk?.delta?.text ?? chunk?.completion) || '';
}
getReasoningDelta(chunk) {
return chunk?.delta?.thinking || '';
}
}
const tokenizersCache = {};
/** Helper function to introduce a delay before retrying */
function delayBeforeRetry(attempts, baseDelay = 1000) {
@@ -83,8 +70,6 @@ class AnthropicClient extends BaseClient {
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
/** @type {SplitStreamHandler | undefined} */
this.streamHandler;
}
setOptions(options) {
@@ -114,10 +99,9 @@ class AnthropicClient extends BaseClient {
const modelMatch = matchModelName(this.modelOptions.model, EModelEndpoint.anthropic);
this.isClaude3 = modelMatch.includes('claude-3');
this.isLegacyOutput = !(
/claude-3[-.]5-sonnet/.test(modelMatch) || /claude-3[-.]7/.test(modelMatch)
);
this.supportsCacheControl = this.options.promptCache && checkPromptCacheSupport(modelMatch);
this.isLegacyOutput = !modelMatch.includes('claude-3-5-sonnet');
this.supportsCacheControl =
this.options.promptCache && this.checkPromptCacheSupport(modelMatch);
if (
this.isLegacyOutput &&
@@ -143,7 +127,7 @@ class AnthropicClient extends BaseClient {
this.options.endpointType ?? this.options.endpoint,
this.options.endpointTokenConfig,
) ??
anthropicSettings.maxOutputTokens.reset(this.modelOptions.model);
1500;
this.maxPromptTokens =
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
@@ -165,6 +149,7 @@ class AnthropicClient extends BaseClient {
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
return this;
}
@@ -189,9 +174,18 @@ class AnthropicClient extends BaseClient {
options.baseURL = this.options.reverseProxyUrl;
}
const headers = getClaudeHeaders(requestOptions?.model, this.supportsCacheControl);
if (headers) {
options.defaultHeaders = headers;
if (
this.supportsCacheControl &&
requestOptions?.model &&
requestOptions.model.includes('claude-3-5-sonnet')
) {
options.defaultHeaders = {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15,prompt-caching-2024-07-31',
};
} else if (this.supportsCacheControl) {
options.defaultHeaders = {
'anthropic-beta': 'prompt-caching-2024-07-31',
};
}
return new Anthropic(options);
@@ -425,7 +419,7 @@ class AnthropicClient extends BaseClient {
}
let { context: messagesInWindow, remainingContextTokens } =
await this.getMessagesWithinTokenLimit({ messages: formattedMessages });
await this.getMessagesWithinTokenLimit(formattedMessages);
const tokenCountMap = orderedMessages
.slice(orderedMessages.length - messagesInWindow.length)
@@ -677,38 +671,29 @@ class AnthropicClient extends BaseClient {
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
return (useMessages ?? this.useMessages)
return useMessages ?? this.useMessages
? await client.messages.create(options)
: await client.completions.create(options);
}
getMessageMapMethod() {
/**
* @param {TMessage} msg
*/
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
}
return msg;
};
}
/**
* @param {string[]} [intermediateReply]
* @returns {string}
* @param {string} modelName
* @returns {boolean}
*/
getStreamText(intermediateReply) {
if (!this.streamHandler) {
return intermediateReply?.join('') ?? '';
checkPromptCacheSupport(modelName) {
const modelMatch = matchModelName(modelName, EModelEndpoint.anthropic);
if (modelMatch.includes('claude-3-5-sonnet-latest')) {
return false;
}
const reasoningText = this.streamHandler.reasoningTokens.join('');
const reasoningBlock = reasoningText.length > 0 ? `:::thinking\n${reasoningText}\n:::\n` : '';
return `${reasoningBlock}${this.streamHandler.tokens.join('')}`;
if (
modelMatch === 'claude-3-5-sonnet' ||
modelMatch === 'claude-3-5-haiku' ||
modelMatch === 'claude-3-haiku' ||
modelMatch === 'claude-3-opus'
) {
return true;
}
return false;
}
async sendCompletion(payload, { onProgress, abortController }) {
@@ -728,6 +713,7 @@ class AnthropicClient extends BaseClient {
user_id: this.user,
};
let text = '';
const {
stream,
model,
@@ -738,37 +724,24 @@ class AnthropicClient extends BaseClient {
topK: top_k,
} = this.modelOptions;
let requestOptions = {
const requestOptions = {
model,
stream: stream || true,
stop_sequences,
temperature,
metadata,
top_p,
top_k,
};
if (!/claude-3[-.]7/.test(model)) {
if (top_p !== undefined) {
requestOptions.top_p = top_p;
}
if (top_k !== undefined) {
requestOptions.top_k = top_k;
}
}
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens =
maxOutputTokens || anthropicSettings.maxOutputTokens.reset(requestOptions.model);
requestOptions.max_tokens = maxOutputTokens || legacy.maxOutputTokens.default;
} else {
requestOptions.prompt = payload;
requestOptions.max_tokens_to_sample = maxOutputTokens || legacy.maxOutputTokens.default;
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
}
requestOptions = configureReasoning(requestOptions, {
thinking: this.options.thinking,
thinkingBudget: this.options.thinkingBudget,
});
if (this.systemMessage && this.supportsCacheControl === true) {
requestOptions.system = [
{
@@ -786,17 +759,13 @@ class AnthropicClient extends BaseClient {
}
logger.debug('[AnthropicClient]', { ...requestOptions });
this.streamHandler = new SplitStreamHandler({
accumulate: true,
runId: this.responseMessageId,
handlers: {
[GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
},
});
let intermediateReply = this.streamHandler.tokens;
const handleChunk = (currentChunk) => {
if (currentChunk) {
text += currentChunk;
onProgress(currentChunk);
}
};
const maxRetries = 3;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
@@ -817,15 +786,22 @@ class AnthropicClient extends BaseClient {
});
for await (const completion of response) {
// Handle each completion as before
const type = completion?.type ?? '';
if (tokenEventTypes.has(type)) {
logger.debug(`[AnthropicClient] ${type}`, completion);
this[type] = completion;
}
this.streamHandler.handle(completion);
if (completion?.delta?.text) {
handleChunk(completion.delta.text);
} else if (completion.completion) {
handleChunk(completion.completion);
}
await sleep(streamRate);
}
// Successful processing, exit loop
break;
} catch (error) {
attempts += 1;
@@ -835,10 +811,6 @@ class AnthropicClient extends BaseClient {
if (attempts < maxRetries) {
await delayBeforeRetry(attempts, 350);
} else if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
} else {
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
}
@@ -854,7 +826,8 @@ class AnthropicClient extends BaseClient {
}
await processResponse.bind(this)();
return this.getStreamText(intermediateReply);
return text.trim();
}
getSaveOptions() {
@@ -864,8 +837,6 @@ class AnthropicClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
promptCache: this.options.promptCache,
thinking: this.options.thinking,
thinkingBudget: this.options.thinkingBudget,
resendFiles: this.options.resendFiles,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
@@ -878,18 +849,22 @@ class AnthropicClient extends BaseClient {
logger.debug('AnthropicClient doesn\'t use getBuildMessagesOptions');
}
getEncoding() {
return 'cl100k_base';
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
if (tokenizersCache[encoding]) {
return tokenizersCache[encoding];
}
let tokenizer;
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
return tokenizer;
}
/**
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
* @param {string} text - The text to get the token count for.
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
return this.gptEncoder.encode(text, 'all').length;
}
/**

View File

@@ -4,16 +4,16 @@ const {
supportsBalanceCheck,
isAgentsEndpoint,
isParamEndpoint,
EModelEndpoint,
excludedKeys,
ErrorTypes,
Constants,
CacheKeys,
Time,
} = require('librechat-data-provider');
const { getMessages, saveMessage, updateMessage, saveConvo, getConvo } = require('~/models');
const { getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const { truncateToolCallOutputs } = require('./prompts');
const checkBalance = require('~/models/checkBalance');
const { getFiles } = require('~/models/File');
const { getLogStores } = require('~/cache');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -50,20 +50,6 @@ class BaseClient {
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'completion_tokens';
/** @type {Set<string>} */
this.savedMessageIds = new Set();
/**
* Flag to determine if the client re-submitted the latest assistant message.
* @type {boolean | undefined} */
this.continued;
/**
* Flag to determine if the client has already fetched the conversation while saving new messages.
* @type {boolean | undefined} */
this.fetchedConvo;
/** @type {TMessage[]} */
this.currentMessages = [];
/** @type {import('librechat-data-provider').VisionModes | undefined} */
this.visionMode;
}
setOptions() {
@@ -98,7 +84,7 @@ class BaseClient {
return this.options.agent.id;
}
return this.modelOptions?.model ?? this.model;
return this.modelOptions.model;
}
/**
@@ -107,7 +93,7 @@ class BaseClient {
* @returns {number}
*/
getTokenCountForResponse(responseMessage) {
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', responseMessage);
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', responseMessage);
}
/**
@@ -118,7 +104,7 @@ class BaseClient {
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('[BaseClient] `recordTokenUsage` not implemented.', {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
promptTokens,
completionTokens,
});
@@ -274,24 +260,17 @@ class BaseClient {
/**
* Adds instructions to the messages array. If the instructions object is empty or undefined,
* the original messages array is returned. Otherwise, the instructions are added to the messages
* array either at the beginning (default) or preserving the last message at the end.
* array, preserving the last message at the end.
*
* @param {Array} messages - An array of messages.
* @param {Object} instructions - An object containing instructions to be added to the messages.
* @param {boolean} [beforeLast=false] - If true, adds instructions before the last message; if false, adds at the beginning.
* @returns {Array} An array containing messages and instructions, or the original messages if instructions are empty.
*/
addInstructions(messages, instructions, beforeLast = false) {
addInstructions(messages, instructions) {
const payload = [];
if (!instructions || Object.keys(instructions).length === 0) {
return messages;
}
if (!beforeLast) {
return [instructions, ...messages];
}
// Legacy behavior: add instructions before the last message
const payload = [];
if (messages.length > 1) {
payload.push(...messages.slice(0, -1));
}
@@ -306,9 +285,6 @@ class BaseClient {
}
async handleTokenCountMap(tokenCountMap) {
if (this.clientName === EModelEndpoint.agents) {
return;
}
if (this.currentMessages.length === 0) {
return;
}
@@ -357,38 +333,25 @@ class BaseClient {
* If the token limit would be exceeded by adding a message, that message is not added to the context and remains in the original array.
* The method uses `push` and `pop` operations for efficient array manipulation, and reverses the context array at the end to maintain the original order of the messages.
*
* @param {Object} params
* @param {TMessage[]} params.messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest.
* @param {number} [params.maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`.
* @param {{ role: 'system', content: text, tokenCount: number }} [params.instructions] - Instructions already added to the context at index 0.
* @returns {Promise<{
* context: TMessage[],
* remainingContextTokens: number,
* messagesToRefine: TMessage[],
* summaryIndex: number,
* }>} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
* @param {Array} _messages - An array of messages, each with a `tokenCount` property. The messages should be ordered from oldest to newest.
* @param {number} [maxContextTokens] - The max number of tokens allowed in the context. If not provided, defaults to `this.maxContextTokens`.
* @returns {Object} An object with four properties: `context`, `summaryIndex`, `remainingContextTokens`, and `messagesToRefine`.
* `context` is an array of messages that fit within the token limit.
* `summaryIndex` is the index of the first message in the `messagesToRefine` array.
* `remainingContextTokens` is the number of tokens remaining within the limit after adding the messages to the context.
* `messagesToRefine` is an array of messages that were not added to the context because they would have exceeded the token limit.
*/
async getMessagesWithinTokenLimit({ messages: _messages, maxContextTokens, instructions }) {
async getMessagesWithinTokenLimit(_messages, maxContextTokens) {
// Every reply is primed with <|start|>assistant<|message|>, so we
// start with 3 tokens for the label after all messages have been counted.
let summaryIndex = -1;
let currentTokenCount = 3;
const instructionsTokenCount = instructions?.tokenCount ?? 0;
let remainingContextTokens =
(maxContextTokens ?? this.maxContextTokens) - instructionsTokenCount;
let summaryIndex = -1;
let remainingContextTokens = maxContextTokens ?? this.maxContextTokens;
const messages = [..._messages];
const context = [];
if (currentTokenCount < remainingContextTokens) {
while (messages.length > 0 && currentTokenCount < remainingContextTokens) {
if (messages.length === 1 && instructions) {
break;
}
const poppedMessage = messages.pop();
const { tokenCount } = poppedMessage;
@@ -402,11 +365,6 @@ class BaseClient {
}
}
if (instructions) {
context.push(_messages[0]);
messages.shift();
}
const prunedMemory = messages;
summaryIndex = prunedMemory.length - 1;
remainingContextTokens -= currentTokenCount;
@@ -431,38 +389,12 @@ class BaseClient {
if (instructions) {
({ tokenCount, ..._instructions } = instructions);
}
_instructions && logger.debug('[BaseClient] instructions tokenCount: ' + tokenCount);
if (tokenCount && tokenCount > this.maxContextTokens) {
const info = `${tokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Instructions token count exceeds max token count (${info}).`);
throw new Error(errorMessage);
}
if (this.clientName === EModelEndpoint.agents) {
const { dbMessages, editedIndices } = truncateToolCallOutputs(
orderedMessages,
this.maxContextTokens,
this.getTokenCountForMessage.bind(this),
);
if (editedIndices.length > 0) {
logger.debug('[BaseClient] Truncated tool call outputs:', editedIndices);
for (const index of editedIndices) {
formattedMessages[index].content = dbMessages[index].content;
}
orderedMessages = dbMessages;
}
}
let payload = this.addInstructions(formattedMessages, _instructions);
let orderedWithInstructions = this.addInstructions(orderedMessages, instructions);
let { context, remainingContextTokens, messagesToRefine, summaryIndex } =
await this.getMessagesWithinTokenLimit({
messages: orderedWithInstructions,
instructions,
});
await this.getMessagesWithinTokenLimit(orderedWithInstructions);
logger.debug('[BaseClient] Context Count (1/2)', {
remainingContextTokens,
@@ -474,9 +406,7 @@ class BaseClient {
let { shouldSummarize } = this;
// Calculate the difference in length to determine how many messages were discarded if any
let payload;
let { length } = formattedMessages;
length += instructions != null ? 1 : 0;
const { length } = payload;
const diff = length - context.length;
const firstMessage = orderedWithInstructions[0];
const usePrevSummary =
@@ -486,31 +416,18 @@ class BaseClient {
this.previous_summary.messageId === firstMessage.messageId;
if (diff > 0) {
payload = formattedMessages.slice(diff);
payload = payload.slice(diff);
logger.debug(
`[BaseClient] Difference between original payload (${length}) and context (${context.length}): ${diff}`,
);
}
payload = this.addInstructions(payload ?? formattedMessages, _instructions);
const latestMessage = orderedWithInstructions[orderedWithInstructions.length - 1];
if (payload.length === 0 && !shouldSummarize && latestMessage) {
const info = `${latestMessage.tokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Prompt token count exceeds max token count (${info}).`);
throw new Error(errorMessage);
} else if (
_instructions &&
payload.length === 1 &&
payload[0].content === _instructions.content
) {
const info = `${tokenCount + 3} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(
`Including instructions, the prompt token count exceeds remaining max token count (${info}).`,
);
throw new Error(errorMessage);
}
if (usePrevSummary) {
@@ -591,7 +508,7 @@ class BaseClient {
conversationId,
parentMessageId: userMessage.messageId,
isCreatedByUser: false,
model: this.modelOptions?.model ?? this.model,
model: this.modelOptions.model,
sender: this.sender,
text: generation,
};
@@ -599,7 +516,6 @@ class BaseClient {
} else {
latestMessage.text = generation;
}
this.continued = true;
} else {
this.currentMessages.push(userMessage);
}
@@ -629,7 +545,6 @@ class BaseClient {
if (!isEdited && !this.skipSaveUserMessage) {
this.userMessagePromise = this.saveMessageToDatabase(userMessage, saveOptions, user);
this.savedMessageIds.add(userMessage.messageId);
if (typeof opts?.getReqData === 'function') {
opts.getReqData({
userMessagePromise: this.userMessagePromise,
@@ -648,8 +563,8 @@ class BaseClient {
user: this.user,
tokenType: 'prompt',
amount: promptTokens,
model: this.modelOptions.model,
endpoint: this.options.endpoint,
model: this.modelOptions?.model ?? this.model,
endpointTokenConfig: this.options.endpointTokenConfig,
},
});
@@ -659,7 +574,6 @@ class BaseClient {
const completion = await this.sendCompletion(payload, opts);
this.abortController.requestCompleted = true;
/** @type {TMessage} */
const responseMessage = {
messageId: responseMessageId,
conversationId,
@@ -707,7 +621,7 @@ class BaseClient {
await this.updateUserMessageTokenCount({ usage, tokenCountMap, userMessage, opts });
} else {
responseMessage.tokenCount = this.getTokenCountForResponse(responseMessage);
completionTokens = responseMessage.tokenCount;
completionTokens = this.getTokenCount(completion);
}
await this.recordTokenUsage({ promptTokens, completionTokens, usage });
@@ -721,16 +635,16 @@ class BaseClient {
responseMessage.attachments = (await Promise.all(this.artifactPromises)).filter((a) => a);
}
if (this.options.attachments) {
try {
saveOptions.files = this.options.attachments.map((attachments) => attachments.file_id);
} catch (error) {
logger.error('[BaseClient] Error mapping attachments for conversation', error);
}
}
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
this.savedMessageIds.add(responseMessage.messageId);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
delete responseMessage.tokenCount;
return responseMessage;
}
@@ -868,39 +782,16 @@ class BaseClient {
return { message: savedMessage };
}
const fieldsToKeep = {
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
};
const existingConvo =
this.fetchedConvo === true
? null
: await getConvo(this.options.req?.user?.id, message.conversationId);
const unsetFields = {};
if (existingConvo != null) {
this.fetchedConvo = true;
for (const key in existingConvo) {
if (!key) {
continue;
}
if (excludedKeys.has(key)) {
continue;
}
if (endpointOptions?.[key] === undefined) {
unsetFields[key] = 1;
}
}
}
const conversation = await saveConvo(this.options.req, fieldsToKeep, {
context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo',
unsetFields,
});
const conversation = await saveConvo(
this.options.req,
{
conversationId: message.conversationId,
endpoint: this.options.endpoint,
endpointType: this.options.endpointType,
...endpointOptions,
},
{ context: 'api/app/clients/BaseClient.js - saveMessageToDatabase #saveConvo' },
);
return { message: savedMessage, conversation };
}
@@ -1011,9 +902,8 @@ class BaseClient {
// Note: gpt-3.5-turbo and gpt-4 may update over time. Use default for these as well as for unknown models
let tokensPerMessage = 3;
let tokensPerName = 1;
const model = this.modelOptions?.model ?? this.model;
if (model === 'gpt-3.5-turbo-0301') {
if (this.modelOptions.model === 'gpt-3.5-turbo-0301') {
tokensPerMessage = 4;
tokensPerName = -1;
}
@@ -1025,24 +915,6 @@ class BaseClient {
continue;
}
if (item.type === 'tool_call' && item.tool_call != null) {
const toolName = item.tool_call?.name || '';
if (toolName != null && toolName && typeof toolName === 'string') {
numTokens += this.getTokenCount(toolName);
}
const args = item.tool_call?.args || '';
if (args != null && args && typeof args === 'string') {
numTokens += this.getTokenCount(args);
}
const output = item.tool_call?.output || '';
if (output != null && output && typeof output === 'string') {
numTokens += this.getTokenCount(output);
}
continue;
}
const nestedValue = item[item.type];
if (!nestedValue) {
@@ -1089,15 +961,6 @@ class BaseClient {
return _messages;
}
const seen = new Set();
const attachmentsProcessed =
this.options.attachments && !(this.options.attachments instanceof Promise);
if (attachmentsProcessed) {
for (const attachment of this.options.attachments) {
seen.add(attachment.file_id);
}
}
/**
*
* @param {TMessage} message
@@ -1108,24 +971,12 @@ class BaseClient {
this.message_file_map = {};
}
const fileIds = [];
for (const file of message.files) {
if (seen.has(file.file_id)) {
continue;
}
fileIds.push(file.file_id);
seen.add(file.file_id);
}
if (fileIds.length === 0) {
return message;
}
const fileIds = message.files.map((file) => file.file_id);
const files = await getFiles({
file_id: { $in: fileIds },
});
await this.addImageURLs(message, files, this.visionMode);
await this.addImageURLs(message, files);
this.message_file_map[message.messageId] = files;
return message;

View File

@@ -13,6 +13,7 @@ const {
const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
const { createContextHandlers } = require('./prompts');
const { createCoherePayload } = require('./llm');
const { Agent, ProxyAgent } = require('undici');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -185,6 +186,10 @@ class ChatGPTClient extends BaseClient {
headers: {
'Content-Type': 'application/json',
},
dispatcher: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
};
if (this.isVisionModel) {
@@ -222,16 +227,6 @@ class ChatGPTClient extends BaseClient {
this.azure = !serverless && azureOptions;
this.azureEndpoint =
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
if (serverless === true) {
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
this.options.headers['api-key'] = this.apiKey;
}
}
if (this.options.defaultQuery) {
opts.defaultQuery = this.options.defaultQuery;
}
if (this.options.headers) {
@@ -270,6 +265,10 @@ class ChatGPTClient extends BaseClient {
opts.headers['X-Title'] = 'LibreChat';
}
if (this.options.proxy) {
opts.dispatcher = new ProxyAgent(this.options.proxy);
}
/* hacky fixes for Mistral AI API:
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
- If there is only one message and it's a system message, change the role to user

View File

@@ -1,25 +1,22 @@
const { google } = require('googleapis');
const { concat } = require('@langchain/core/utils/stream');
const { Agent, ProxyAgent } = require('undici');
const { ChatVertexAI } = require('@langchain/google-vertexai');
const { GoogleVertexAI } = require('@langchain/google-vertexai');
const { ChatGoogleVertexAI } = require('@langchain/google-vertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
const { HumanMessage, SystemMessage } = require('@langchain/core/messages');
const { AIMessage, HumanMessage, SystemMessage } = require('@langchain/core/messages');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
googleGenConfigSchema,
validateVisionModel,
getResponseSender,
endpointSettings,
EModelEndpoint,
ContentTypes,
VisionModes,
ErrorTypes,
Constants,
AuthKeys,
} = require('librechat-data-provider');
const { getSafetySettings } = require('~/server/services/Endpoints/google/llm');
const { encodeAndFormat } = require('~/server/services/Files/images');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const { getModelMaxTokens } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
@@ -33,10 +30,11 @@ const BaseClient = require('./BaseClient');
const loc = process.env.GOOGLE_LOC || 'us-central1';
const publisher = 'google';
const endpointPrefix = `${loc}-aiplatform.googleapis.com`;
const endpointPrefix = `https://${loc}-aiplatform.googleapis.com`;
// const apiEndpoint = loc + '-aiplatform.googleapis.com';
const tokenizersCache = {};
const settings = endpointSettings[EModelEndpoint.google];
const EXCLUDED_GENAI_MODELS = /gemini-(?:1\.0|1-0|pro)/;
class GoogleClient extends BaseClient {
constructor(credentials, options = {}) {
@@ -51,30 +49,14 @@ class GoogleClient extends BaseClient {
const serviceKey = creds[AuthKeys.GOOGLE_SERVICE_KEY] ?? {};
this.serviceKey =
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : (serviceKey ?? {});
/** @type {string | null | undefined} */
this.project_id = this.serviceKey.project_id;
serviceKey && typeof serviceKey === 'string' ? JSON.parse(serviceKey) : serviceKey ?? {};
this.client_email = this.serviceKey.client_email;
this.private_key = this.serviceKey.private_key;
this.project_id = this.serviceKey.project_id;
this.access_token = null;
this.apiKey = creds[AuthKeys.GOOGLE_API_KEY];
this.reverseProxyUrl = options.reverseProxyUrl;
this.authHeader = options.authHeader;
/** @type {UsageMetadata | undefined} */
this.usage;
/** The key for the usage object's input tokens
* @type {string} */
this.inputTokensKey = 'input_tokens';
/** The key for the usage object's output tokens
* @type {string} */
this.outputTokensKey = 'output_tokens';
this.visionMode = VisionModes.generative;
/** @type {string} */
this.systemMessage;
if (options.skipSetOptions) {
return;
}
@@ -83,7 +65,7 @@ class GoogleClient extends BaseClient {
/* Google specific methods */
constructUrl() {
return `https://${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`;
return `${endpointPrefix}/v1/projects/${this.project_id}/locations/${loc}/publishers/${publisher}/models/${this.modelOptions.model}:serverStreamingPredict`;
}
async getClient() {
@@ -134,13 +116,22 @@ class GoogleClient extends BaseClient {
this.options = options;
}
this.options.examples = (this.options.examples ?? [])
.filter((ex) => ex)
.filter((obj) => obj.input.content !== '' && obj.output.content !== '');
this.modelOptions = this.options.modelOptions || {};
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
/** @type {boolean} Whether using a "GenerativeAI" Model */
this.isGenerativeModel =
this.modelOptions.model.includes('gemini') || this.modelOptions.model.includes('learnlm');
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
const { isGenerativeModel } = this;
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
const { isChatModel } = this;
this.isTextModel =
!isGenerativeModel && !isChatModel && /code|text/.test(this.modelOptions.model);
const { isTextModel } = this;
this.maxContextTokens =
this.options.maxContextTokens ??
@@ -176,18 +167,50 @@ class GoogleClient extends BaseClient {
this.userLabel = this.options.userLabel || 'User';
this.modelLabel = this.options.modelLabel || 'Assistant';
if (isChatModel || isGenerativeModel) {
// Use these faux tokens to help the AI understand the context since we are building the chat log ourselves.
// Trying to use "<|im_start|>" causes the AI to still generate "<" or "<|" at the end sometimes for some reason,
// without tripping the stop sequences, so I'm using "||>" instead.
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('cl100k_base');
} else if (isTextModel) {
this.startToken = '||>';
this.endToken = '';
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true, {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
});
} else {
// Previously I was trying to use "<|endoftext|>" but there seems to be some bug with OpenAI's token counting
// system that causes only the first "<|endoftext|>" to be counted as 1 token, and the rest are not treated
// as a single token. So we're using this instead.
this.startToken = '||>';
this.endToken = '';
try {
this.gptEncoder = this.constructor.getTokenizer(this.modelOptions.model, true);
} catch {
this.gptEncoder = this.constructor.getTokenizer('text-davinci-003', true);
}
}
if (!this.modelOptions.stop) {
const stopTokens = [this.startToken];
if (this.endToken && this.endToken !== this.startToken) {
stopTokens.push(this.endToken);
}
stopTokens.push(`\n${this.userLabel}:`);
stopTokens.push('<|diff_marker|>');
// I chose not to do one for `modelLabel` because I've never seen it happen
this.modelOptions.stop = stopTokens;
}
if (this.options.reverseProxyUrl) {
this.completionsUrl = this.options.reverseProxyUrl;
} else {
this.completionsUrl = this.constructUrl();
}
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
this.systemMessage = promptPrefix;
this.initializeClient();
return this;
}
@@ -219,29 +242,10 @@ class GoogleClient extends BaseClient {
}
formatMessages() {
return ((message) => {
const msg = {
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
content: message?.content ?? message.text,
};
if (!message.image_urls?.length) {
return msg;
}
msg.content = (
!Array.isArray(msg.content)
? [
{
type: ContentTypes.TEXT,
[ContentTypes.TEXT]: msg.content,
},
]
: msg.content
).concat(message.image_urls);
return msg;
}).bind(this);
return ((message) => ({
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
content: message?.content ?? message.text,
})).bind(this);
}
/**
@@ -316,7 +320,7 @@ class GoogleClient extends BaseClient {
}
this.augmentedPrompt = await this.contextHandlers.createContext();
this.systemMessage = this.augmentedPrompt + this.systemMessage;
this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
}
}
@@ -339,6 +343,7 @@ class GoogleClient extends BaseClient {
messages: [new HumanMessage(formatMessage({ message: latestMessage }))],
},
],
parameters: this.modelOptions,
};
return { prompt: payload };
}
@@ -354,58 +359,23 @@ class GoogleClient extends BaseClient {
return { prompt: formattedMessages };
}
/**
* @param {TMessage[]} [messages=[]]
* @param {string} [parentMessageId]
*/
async buildMessages(_messages = [], parentMessageId) {
async buildMessages(messages = [], parentMessageId) {
if (!this.isGenerativeModel && !this.project_id) {
throw new Error('[GoogleClient] PaLM 2 and Codey models are no longer supported.');
throw new Error(
'[GoogleClient] a Service Account JSON Key is required for PaLM 2 and Codey models (Vertex AI)',
);
}
if (this.systemMessage) {
const instructionsTokenCount = this.getTokenCount(this.systemMessage);
this.maxContextTokens = this.maxContextTokens - instructionsTokenCount;
if (this.maxContextTokens < 0) {
const info = `${instructionsTokenCount} / ${this.maxContextTokens}`;
const errorMessage = `{ "type": "${ErrorTypes.INPUT_LENGTH}", "info": "${info}" }`;
logger.warn(`Instructions token count exceeds max context (${info}).`);
throw new Error(errorMessage);
}
}
for (let i = 0; i < _messages.length; i++) {
const message = _messages[i];
if (!message.tokenCount) {
_messages[i].tokenCount = this.getTokenCountForMessage({
role: message.isCreatedByUser ? 'user' : 'assistant',
content: message.content ?? message.text,
});
}
}
const {
payload: messages,
tokenCountMap,
promptTokens,
} = await this.handleContextStrategy({
orderedMessages: _messages,
formattedMessages: _messages,
});
if (!this.project_id && !EXCLUDED_GENAI_MODELS.test(this.modelOptions.model)) {
const result = await this.buildGenerativeMessages(messages);
result.tokenCountMap = tokenCountMap;
result.promptTokens = promptTokens;
return result;
if (!this.project_id && this.modelOptions.model.includes('1.5')) {
return await this.buildGenerativeMessages(messages);
}
if (this.options.attachments && this.isGenerativeModel) {
const result = this.buildVisionMessages(messages, parentMessageId);
result.tokenCountMap = tokenCountMap;
result.promptTokens = promptTokens;
return result;
return this.buildVisionMessages(messages, parentMessageId);
}
if (this.isTextModel) {
return this.buildMessagesPrompt(messages, parentMessageId);
}
let payload = {
@@ -417,14 +387,25 @@ class GoogleClient extends BaseClient {
.map((message) => formatMessage({ message, langChain: true })),
},
],
parameters: this.modelOptions,
};
if (this.systemMessage) {
payload.instances[0].context = this.systemMessage;
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (promptPrefix) {
payload.instances[0].context = promptPrefix;
}
if (this.options.examples.length > 0) {
payload.instances[0].examples = this.options.examples;
}
logger.debug('[GoogleClient] buildMessages', payload);
return { prompt: payload, tokenCountMap, promptTokens };
return { prompt: payload };
}
async buildMessagesPrompt(messages, parentMessageId) {
@@ -438,7 +419,10 @@ class GoogleClient extends BaseClient {
parentMessageId,
});
const formattedMessages = orderedMessages.map(this.formatMessages());
const formattedMessages = orderedMessages.map((message) => ({
author: message.isCreatedByUser ? this.userLabel : this.modelLabel,
content: message?.content ?? message.text,
}));
let lastAuthor = '';
let groupedMessages = [];
@@ -466,7 +450,17 @@ class GoogleClient extends BaseClient {
identityPrefix = `${identityPrefix}\nYou are ${this.options.modelLabel}`;
}
let promptPrefix = (this.systemMessage ?? '').trim();
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (promptPrefix) {
// If the prompt prefix doesn't end with the end token, add it.
if (!promptPrefix.endsWith(`${this.endToken}`)) {
promptPrefix = `${promptPrefix.trim()}${this.endToken}\n\n`;
}
promptPrefix = `\nContext:\n${promptPrefix}`;
}
if (identityPrefix) {
promptPrefix = `${identityPrefix}${promptPrefix}`;
@@ -503,7 +497,7 @@ class GoogleClient extends BaseClient {
isCreatedByUser || !isEdited
? `\n\n${message.author}:`
: `${promptPrefix}\n\n${message.author}:`;
const messageString = `${messagePrefix}\n${message.content}\n`;
const messageString = `${messagePrefix}\n${message.content}${this.endToken}\n`;
let newPromptBody = `${messageString}${promptBody}`;
context.unshift(message);
@@ -569,48 +563,71 @@ class GoogleClient extends BaseClient {
return { prompt, context };
}
async _getCompletion(payload, abortController = null) {
if (!abortController) {
abortController = new AbortController();
}
const { debug } = this.options;
const url = this.completionsUrl;
if (debug) {
logger.debug('GoogleClient _getCompletion', { url, payload });
}
const opts = {
method: 'POST',
agent: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
signal: abortController.signal,
};
if (this.options.proxy) {
opts.agent = new ProxyAgent(this.options.proxy);
}
const client = await this.getClient();
const res = await client.request({ url, method: 'POST', data: payload });
logger.debug('GoogleClient _getCompletion', { res });
return res.data;
}
createLLM(clientOptions) {
const model = clientOptions.modelName ?? clientOptions.model;
clientOptions.location = loc;
clientOptions.endpoint = endpointPrefix;
let requestOptions = null;
if (this.reverseProxyUrl) {
requestOptions = {
baseUrl: this.reverseProxyUrl,
};
if (this.authHeader) {
requestOptions.customHeaders = {
Authorization: `Bearer ${this.apiKey}`,
};
}
}
if (this.project_id != null) {
clientOptions.endpoint = `${loc}-aiplatform.googleapis.com`;
if (this.project_id && this.isTextModel) {
logger.debug('Creating Google VertexAI client');
return new GoogleVertexAI(clientOptions);
} else if (this.project_id && this.isChatModel) {
logger.debug('Creating Chat Google VertexAI client');
return new ChatGoogleVertexAI(clientOptions);
} else if (this.project_id) {
logger.debug('Creating VertexAI client');
this.visionMode = undefined;
clientOptions.streaming = true;
const client = new ChatVertexAI(clientOptions);
client.temperature = clientOptions.temperature;
client.topP = clientOptions.topP;
client.topK = clientOptions.topK;
client.topLogprobs = clientOptions.topLogprobs;
client.frequencyPenalty = clientOptions.frequencyPenalty;
client.presencePenalty = clientOptions.presencePenalty;
client.maxOutputTokens = clientOptions.maxOutputTokens;
return client;
} else if (!EXCLUDED_GENAI_MODELS.test(model)) {
return new ChatVertexAI(clientOptions);
} else if (model.includes('1.5')) {
logger.debug('Creating GenAI client');
return new GenAI(this.apiKey).getGenerativeModel({ model }, requestOptions);
return new GenAI(this.apiKey).getGenerativeModel(
{
...clientOptions,
model,
},
{ apiVersion: 'v1beta' },
);
}
logger.debug('Creating Chat Google Generative AI client');
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
}
initializeClient() {
let clientOptions = { ...this.modelOptions };
async getCompletion(_payload, options = {}) {
const { parameters, instances } = _payload;
const { onProgress, abortController } = options;
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const { messages: _messages, context, examples: _examples } = instances?.[0] ?? {};
let examples;
let clientOptions = { ...parameters, maxRetries: 2 };
if (this.project_id) {
clientOptions['authOptions'] = {
@@ -621,248 +638,185 @@ class GoogleClient extends BaseClient {
};
}
if (!parameters) {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
this.client = this.createLLM(clientOptions);
return this.client;
}
if (_examples && _examples.length) {
examples = _examples
.map((ex) => {
const { input, output } = ex;
if (!input || !output) {
return undefined;
}
return {
input: new HumanMessage(input.content),
output: new AIMessage(output.content),
};
})
.filter((ex) => ex);
async getCompletion(_payload, options = {}) {
const { onProgress, abortController } = options;
const safetySettings = getSafetySettings(this.modelOptions.model);
const streamRate = this.options.streamRate ?? Constants.DEFAULT_STREAM_RATE;
const modelName = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
clientOptions.examples = examples;
}
const model = this.createLLM(clientOptions);
let reply = '';
/** @type {Error} */
let error;
try {
if (!EXCLUDED_GENAI_MODELS.test(modelName) && !this.project_id) {
/** @type {GenerativeModel} */
const client = this.client;
/** @type {GenerateContentRequest} */
const requestOptions = {
safetySettings,
contents: _payload,
generationConfig: googleGenConfigSchema.parse(this.modelOptions),
const messages = this.isTextModel ? _payload.trim() : _messages;
if (!this.isVisionModel && context && messages?.length > 0) {
messages.unshift(new SystemMessage(context));
}
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (modelName?.includes('1.5') && !this.project_id) {
const client = model;
const requestOptions = {
contents: _payload,
};
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
};
const promptPrefix = (this.systemMessage ?? '').trim();
if (promptPrefix.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
};
}
const delay = modelName.includes('flash') ? 8 : 15;
/** @type {GenAIUsageMetadata} */
let usageMetadata;
abortController.signal.addEventListener(
'abort',
() => {
logger.warn('[GoogleClient] Request was aborted', abortController.signal.reason);
},
{ once: true },
);
const result = await client.generateContentStream(requestOptions, {
signal: abortController.signal,
});
for await (const chunk of result.stream) {
usageMetadata = !usageMetadata
? chunk?.usageMetadata
: Object.assign(usageMetadata, chunk?.usageMetadata);
const chunkText = chunk.text();
await this.generateTextStream(chunkText, onProgress, {
delay,
});
reply += chunkText;
await sleep(streamRate);
}
if (usageMetadata) {
this.usage = {
input_tokens: usageMetadata.promptTokenCount,
output_tokens: usageMetadata.candidatesTokenCount,
};
}
return reply;
}
const { instances } = _payload;
const { messages: messages, context } = instances?.[0] ?? {};
requestOptions.safetySettings = _payload.safetySettings;
if (!this.isVisionModel && context && messages?.length > 0) {
messages.unshift(new SystemMessage(context));
}
/** @type {import('@langchain/core/messages').AIMessageChunk['usage_metadata']} */
let usageMetadata;
/** @type {ChatVertexAI} */
const client = this.client;
const stream = await client.stream(messages, {
signal: abortController.signal,
streamUsage: true,
safetySettings,
});
let delay = this.options.streamRate || 8;
if (!this.options.streamRate) {
if (this.isGenerativeModel) {
delay = 15;
}
if (modelName.includes('flash')) {
delay = 5;
}
}
for await (const chunk of stream) {
if (chunk?.usage_metadata) {
const metadata = chunk.usage_metadata;
for (const key in metadata) {
if (Number.isNaN(metadata[key])) {
delete metadata[key];
}
}
usageMetadata = !usageMetadata ? metadata : concat(usageMetadata, metadata);
}
const chunkText = chunk?.content ?? '';
const delay = modelName.includes('flash') ? 8 : 14;
const result = await client.generateContentStream(requestOptions);
for await (const chunk of result.stream) {
const chunkText = chunk.text();
await this.generateTextStream(chunkText, onProgress, {
delay,
});
reply += chunkText;
await sleep(streamRate);
}
if (usageMetadata) {
this.usage = usageMetadata;
}
} catch (e) {
error = e;
logger.error('[GoogleClient] There was an issue generating the completion', e);
return reply;
}
if (error != null && reply === '') {
const errorMessage = `{ "type": "${ErrorTypes.GoogleError}", "info": "${
error.message ?? 'The Google provider failed to generate content, please contact the Admin.'
}" }`;
throw new Error(errorMessage);
const stream = await model.stream(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings: _payload.safetySettings,
});
let delay = this.options.streamRate || 8;
if (!this.options.streamRate) {
if (this.isGenerativeModel) {
delay = 12;
}
if (modelName.includes('flash')) {
delay = 5;
}
}
for await (const chunk of stream) {
const chunkText = chunk?.content ?? chunk;
await this.generateTextStream(chunkText, onProgress, {
delay,
});
reply += chunkText;
}
return reply;
}
/**
* Get stream usage as returned by this client's API response.
* @returns {UsageMetadata} The stream usage object.
*/
getStreamUsage() {
return this.usage;
}
/**
* Calculates the correct token count for the current user message based on the token count map and API usage.
* Edge case: If the calculation results in a negative value, it returns the original estimate.
* If revisiting a conversation with a chat history entirely composed of token estimates,
* the cumulative token count going forward should become more accurate as the conversation progresses.
* @param {Object} params - The parameters for the calculation.
* @param {Record<string, number>} params.tokenCountMap - A map of message IDs to their token counts.
* @param {string} params.currentMessageId - The ID of the current message to calculate.
* @param {UsageMetadata} params.usage - The usage object returned by the API.
* @returns {number} The correct token count for the current user message.
*/
calculateCurrentTokenCount({ tokenCountMap, currentMessageId, usage }) {
const originalEstimate = tokenCountMap[currentMessageId] || 0;
if (!usage || typeof usage.input_tokens !== 'number') {
return originalEstimate;
}
tokenCountMap[currentMessageId] = 0;
const totalTokensFromMap = Object.values(tokenCountMap).reduce((sum, count) => {
const numCount = Number(count);
return sum + (isNaN(numCount) ? 0 : numCount);
}, 0);
const totalInputTokens = usage.input_tokens ?? 0;
const currentMessageTokens = totalInputTokens - totalTokensFromMap;
return currentMessageTokens > 0 ? currentMessageTokens : originalEstimate;
}
/**
* @param {object} params
* @param {number} params.promptTokens
* @param {number} params.completionTokens
* @param {UsageMetadata} [params.usage]
* @param {string} [params.model]
* @param {string} [params.context='message']
* @returns {Promise<void>}
*/
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
await spendTokens(
{
context,
user: this.user ?? this.options.req?.user?.id,
conversationId: this.conversationId,
model: model ?? this.modelOptions.model,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
);
}
/**
* Stripped-down logic for generating a title. This uses the non-streaming APIs, since the user does not see titles streaming
*/
async titleChatCompletion(_payload, options = {}) {
let reply = '';
const { abortController } = options;
const { parameters, instances } = _payload;
const { messages: _messages, examples: _examples } = instances?.[0] ?? {};
const model = this.modelOptions.modelName ?? this.modelOptions.model ?? '';
const safetySettings = getSafetySettings(model);
if (!EXCLUDED_GENAI_MODELS.test(model) && !this.project_id) {
logger.debug('Identified titling model as GenAI version');
let clientOptions = { ...parameters, maxRetries: 2 };
logger.debug('Initialized title client options');
if (this.project_id) {
clientOptions['authOptions'] = {
credentials: {
...this.serviceKey,
},
projectId: this.project_id,
};
}
if (!parameters) {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
const model = this.createLLM(clientOptions);
let reply = '';
const messages = this.isTextModel ? _payload.trim() : _messages;
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (modelName?.includes('1.5') && !this.project_id) {
logger.debug('Identified titling model as 1.5 version');
/** @type {GenerativeModel} */
const client = this.client;
const client = model;
const requestOptions = {
contents: _payload,
safetySettings,
generationConfig: {
temperature: 0.5,
},
};
const result = await client.generateContent(requestOptions);
reply = result.response?.text();
return reply;
} else {
const { instances } = _payload;
const { messages } = instances?.[0] ?? {};
const titleResponse = await this.client.invoke(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings,
});
if (titleResponse.usage_metadata) {
await this.recordTokenUsage({
model,
promptTokens: titleResponse.usage_metadata.input_tokens,
completionTokens: titleResponse.usage_metadata.output_tokens,
context: 'title',
});
let promptPrefix = (this.options.promptPrefix ?? '').trim();
if (typeof this.options.artifactsPrompt === 'string' && this.options.artifactsPrompt) {
promptPrefix = `${promptPrefix ?? ''}\n${this.options.artifactsPrompt}`.trim();
}
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: promptPrefix,
},
],
};
}
const safetySettings = _payload.safetySettings;
requestOptions.safetySettings = safetySettings;
const result = await client.generateContent(requestOptions);
reply = result.response?.text();
return reply;
} else {
logger.debug('Beginning titling');
const safetySettings = _payload.safetySettings;
const titleResponse = await model.invoke(messages, {
signal: abortController.signal,
timeout: 7000,
safetySettings: safetySettings,
});
reply = titleResponse.content;
// TODO: RECORD TOKEN USAGE
return reply;
}
}
@@ -886,8 +840,15 @@ class GoogleClient extends BaseClient {
},
]);
if (this.isVisionModel) {
logger.warn(
`Current vision model does not support titling without an attachment; falling back to default model ${settings.model.default}`,
);
payload.parameters = { ...payload.parameters, model: settings.model.default };
}
try {
this.initializeClient();
title = await this.titleChatCompletion(payload, {
abortController: new AbortController(),
onProgress: () => {},
@@ -901,10 +862,8 @@ class GoogleClient extends BaseClient {
getSaveOptions() {
return {
endpointType: null,
artifacts: this.options.artifacts,
promptPrefix: this.options.promptPrefix,
maxContextTokens: this.options.maxContextTokens,
modelLabel: this.options.modelLabel,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
@@ -918,39 +877,53 @@ class GoogleClient extends BaseClient {
}
async sendCompletion(payload, opts = {}) {
payload.safetySettings = this.getSafetySettings();
let reply = '';
reply = await this.getCompletion(payload, opts);
return reply.trim();
}
getEncoding() {
return 'cl100k_base';
}
async getVertexTokenCount(text) {
/** @type {ChatVertexAI} */
const client = this.client ?? this.initializeClient();
const connection = client.connection;
const gAuthClient = connection.client;
const tokenEndpoint = `https://${connection._endpoint}/${connection.apiVersion}/projects/${this.project_id}/locations/${connection._location}/publishers/google/models/${connection.model}/:countTokens`;
const result = await gAuthClient.request({
url: tokenEndpoint,
method: 'POST',
data: {
contents: [{ role: 'user', parts: [{ text }] }],
getSafetySettings() {
return [
{
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
threshold:
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
});
return result;
{
category: 'HARM_CATEGORY_HATE_SPEECH',
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_HARASSMENT',
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
{
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
threshold:
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
},
];
}
/* TO-DO: Handle tokens with Google tokenization NOTE: these are required */
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
if (tokenizersCache[encoding]) {
return tokenizersCache[encoding];
}
let tokenizer;
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
return tokenizer;
}
/**
* Returns the token count of a given text. It also checks and resets the tokenizers if necessary.
* @param {string} text - The text to get the token count for.
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
return this.gptEncoder.encode(text, 'all').length;
}
}

View File

@@ -2,7 +2,7 @@ const { z } = require('zod');
const axios = require('axios');
const { Ollama } = require('ollama');
const { Constants } = require('librechat-data-provider');
const { deriveBaseURL, logAxiosError } = require('~/utils');
const { deriveBaseURL } = require('~/utils');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
@@ -68,7 +68,7 @@ class OllamaClient {
} catch (error) {
const logMessage =
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
logAxiosError({ message: logMessage, error });
logger.error(logMessage, error);
return [];
}
}

View File

@@ -1,13 +1,11 @@
const OpenAI = require('openai');
const { OllamaClient } = require('./OllamaClient');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { SplitStreamHandler, GraphEvents } = require('@librechat/agents');
const {
Constants,
ImageDetail,
EModelEndpoint,
resolveHeaders,
KnownEndpoints,
openAISettings,
ImageDetailCost,
CohereConstants,
@@ -15,6 +13,7 @@ const {
validateVisionModel,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
extractBaseURL,
constructAzureURL,
@@ -30,17 +29,21 @@ const {
createContextHandlers,
} = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { addSpaceIfNeeded, isEnabled, sleep } = require('~/server/utils');
const Tokenizer = require('~/server/services/Tokenizer');
const { spendTokens } = require('~/models/spendTokens');
const { isEnabled, sleep } = require('~/server/utils');
const { handleOpenAIErrors } = require('./tools/util');
const { createLLM, RunManager } = require('./llm');
const { logger, sendEvent } = require('~/config');
const ChatGPTClient = require('./ChatGPTClient');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
// Cache to store Tiktoken instances
const tokenizersCache = {};
// Counter for keeping track of the number of tokenizer calls
let tokenizerCallsCount = 0;
class OpenAIClient extends BaseClient {
constructor(apiKey, options = {}) {
@@ -66,9 +69,7 @@ class OpenAIClient extends BaseClient {
/** @type {OpenAIUsageMetadata | undefined} */
this.usage;
/** @type {boolean|undefined} */
this.isOmni;
/** @type {SplitStreamHandler | undefined} */
this.streamHandler;
this.isO1Model;
}
// TODO: PluginsClient calls this 3x, unneeded
@@ -106,13 +107,21 @@ class OpenAIClient extends BaseClient {
this.checkVisionRequest(this.options.attachments);
}
const omniPattern = /\b(o1|o3)\b/i;
this.isOmni = omniPattern.test(this.modelOptions.model);
this.isO1Model = /\bo1\b/i.test(this.modelOptions.model);
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
this.apiKey = OPENROUTER_API_KEY;
this.useOpenRouter = true;
}
const { OPENAI_FORCE_PROMPT } = process.env ?? {};
const { reverseProxyUrl: reverseProxy } = this.options;
if (!this.useOpenRouter && reverseProxy && reverseProxy.includes(KnownEndpoints.openrouter)) {
if (
!this.useOpenRouter &&
reverseProxy &&
reverseProxy.includes('https://openrouter.ai/api/v1')
) {
this.useOpenRouter = true;
}
@@ -138,7 +147,7 @@ class OpenAIClient extends BaseClient {
const { model } = this.modelOptions;
this.isChatCompletion =
omniPattern.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy;
/\bo1\b/i.test(model) || model.includes('gpt') || this.useOpenRouter || !!reverseProxy;
this.isChatGptModel = this.isChatCompletion;
if (
model.includes('text-davinci') ||
@@ -297,8 +306,75 @@ class OpenAIClient extends BaseClient {
}
}
getEncoding() {
return this.model?.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
// Selects an appropriate tokenizer based on the current configuration of the client instance.
// It takes into account factors such as whether it's a chat completion, an unofficial chat GPT model, etc.
selectTokenizer() {
let tokenizer;
this.encoding = 'text-davinci-003';
if (this.isChatCompletion) {
this.encoding = this.modelOptions.model.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
tokenizer = this.constructor.getTokenizer(this.encoding);
} else if (this.isUnofficialChatGptModel) {
const extendSpecialTokens = {
'<|im_start|>': 100264,
'<|im_end|>': 100265,
};
tokenizer = this.constructor.getTokenizer(this.encoding, true, extendSpecialTokens);
} else {
try {
const { model } = this.modelOptions;
this.encoding = model.includes('instruct') ? 'text-davinci-003' : model;
tokenizer = this.constructor.getTokenizer(this.encoding, true);
} catch {
tokenizer = this.constructor.getTokenizer('text-davinci-003', true);
}
}
return tokenizer;
}
// Retrieves a tokenizer either from the cache or creates a new one if one doesn't exist in the cache.
// If a tokenizer is being created, it's also added to the cache.
static getTokenizer(encoding, isModelName = false, extendSpecialTokens = {}) {
let tokenizer;
if (tokenizersCache[encoding]) {
tokenizer = tokenizersCache[encoding];
} else {
if (isModelName) {
tokenizer = encodingForModel(encoding, extendSpecialTokens);
} else {
tokenizer = getEncoding(encoding, extendSpecialTokens);
}
tokenizersCache[encoding] = tokenizer;
}
return tokenizer;
}
// Frees all encoders in the cache and resets the count.
static freeAndResetAllEncoders() {
try {
Object.keys(tokenizersCache).forEach((key) => {
if (tokenizersCache[key]) {
tokenizersCache[key].free();
delete tokenizersCache[key];
}
});
// Reset count
tokenizerCallsCount = 1;
} catch (error) {
logger.error('[OpenAIClient] Free and reset encoders error', error);
}
}
// Checks if the cache of tokenizers has reached a certain size. If it has, it frees and resets all tokenizers.
resetTokenizersIfNecessary() {
if (tokenizerCallsCount >= 25) {
if (this.options.debug) {
logger.debug('[OpenAIClient] freeAndResetAllEncoders: reached 25 encodings, resetting...');
}
this.constructor.freeAndResetAllEncoders();
}
tokenizerCallsCount++;
}
/**
@@ -307,8 +383,15 @@ class OpenAIClient extends BaseClient {
* @returns {number} The token count of the given text.
*/
getTokenCount(text) {
const encoding = this.getEncoding();
return Tokenizer.getTokenCount(text, encoding);
this.resetTokenizersIfNecessary();
try {
const tokenizer = this.selectTokenizer();
return tokenizer.encode(text, 'all').length;
} catch (error) {
this.constructor.freeAndResetAllEncoders();
const tokenizer = this.selectTokenizer();
return tokenizer.encode(text, 'all').length;
}
}
/**
@@ -340,7 +423,6 @@ class OpenAIClient extends BaseClient {
promptPrefix: this.options.promptPrefix,
resendFiles: this.options.resendFiles,
imageDetail: this.options.imageDetail,
modelLabel: this.options.modelLabel,
iconURL: this.options.iconURL,
greeting: this.options.greeting,
spec: this.options.spec,
@@ -467,7 +549,7 @@ class OpenAIClient extends BaseClient {
promptPrefix = this.augmentedPrompt + promptPrefix;
}
if (promptPrefix && this.isOmni !== true) {
if (promptPrefix && this.isO1Model !== true) {
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
instructions = {
role: 'system',
@@ -495,11 +577,12 @@ class OpenAIClient extends BaseClient {
};
/** EXPERIMENTAL */
if (promptPrefix && this.isOmni === true) {
if (promptPrefix && this.isO1Model === true) {
const lastUserMessageIndex = payload.findLastIndex((message) => message.role === 'user');
if (lastUserMessageIndex !== -1) {
payload[lastUserMessageIndex].content =
`${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
payload[
lastUserMessageIndex
].content = `${promptPrefix}\n${payload[lastUserMessageIndex].content}`;
}
}
@@ -605,9 +688,11 @@ class OpenAIClient extends BaseClient {
}
initializeLLM({
model = 'gpt-4o-mini',
model = 'gpt-3.5-turbo',
modelName,
temperature = 0.2,
presence_penalty = 0,
frequency_penalty = 0,
max_tokens,
streaming,
context,
@@ -618,6 +703,8 @@ class OpenAIClient extends BaseClient {
const modelOptions = {
modelName: modelName ?? model,
temperature,
presence_penalty,
frequency_penalty,
user: this.user,
};
@@ -706,7 +793,7 @@ class OpenAIClient extends BaseClient {
const { OPENAI_TITLE_MODEL } = process.env ?? {};
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-4o-mini';
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
}
@@ -751,12 +838,6 @@ class OpenAIClient extends BaseClient {
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
this.options.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
this.azure = !serverless && azureOptions;
if (serverless === true) {
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
this.options.headers['api-key'] = this.apiKey;
}
}
const titleChatCompletion = async () => {
@@ -788,11 +869,7 @@ ${convo}
}
title = (
await this.sendPayload(instructionsPayload, {
modelOptions,
useChatCompletion,
context: 'title',
})
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion })
).replaceAll('"', '');
const completionTokens = this.getTokenCount(title);
@@ -899,7 +976,7 @@ ${convo}
let prompt;
// TODO: remove the gpt fallback and make it specific to endpoint
const { OPENAI_SUMMARY_MODEL = 'gpt-4o-mini' } = process.env ?? {};
const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {};
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
if (model === Constants.CURRENT_MODEL) {
model = this.modelOptions.model;
@@ -925,10 +1002,7 @@ ${convo}
);
if (excessTokenCount > maxContextTokens) {
({ context } = await this.getMessagesWithinTokenLimit({
messages: context,
maxContextTokens,
}));
({ context } = await this.getMessagesWithinTokenLimit(context, maxContextTokens));
}
if (context.length === 0) {
@@ -1058,58 +1132,10 @@ ${convo}
});
}
/**
*
* @param {string[]} [intermediateReply]
* @returns {string}
*/
getStreamText(intermediateReply) {
if (!this.streamHandler) {
return intermediateReply?.join('') ?? '';
}
let thinkMatch;
let remainingText;
let reasoningText = '';
if (this.streamHandler.reasoningTokens.length > 0) {
reasoningText = this.streamHandler.reasoningTokens.join('');
thinkMatch = reasoningText.match(/<think>([\s\S]*?)<\/think>/)?.[1]?.trim();
if (thinkMatch != null && thinkMatch) {
const reasoningTokens = `:::thinking\n${thinkMatch}\n:::\n`;
remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || '';
return `${reasoningTokens}${remainingText}${this.streamHandler.tokens.join('')}`;
} else if (thinkMatch === '') {
remainingText = reasoningText.split(/<\/think>/)?.[1]?.trim() || '';
return `${remainingText}${this.streamHandler.tokens.join('')}`;
}
}
const reasoningTokens =
reasoningText.length > 0
? `:::thinking\n${reasoningText.replace('<think>', '').replace('</think>', '').trim()}\n:::\n`
: '';
return `${reasoningTokens}${this.streamHandler.tokens.join('')}`;
}
getMessageMapMethod() {
/**
* @param {TMessage} msg
*/
return (msg) => {
if (msg.text != null && msg.text && msg.text.startsWith(':::thinking')) {
msg.text = msg.text.replace(/:::thinking.*?:::/gs, '').trim();
}
return msg;
};
}
async chatCompletion({ payload, onProgress, abortController = null }) {
let error = null;
let intermediateReply = [];
const errorCallback = (err) => (error = err);
const intermediateReply = [];
try {
if (!abortController) {
abortController = new AbortController();
@@ -1143,10 +1169,6 @@ ${convo}
opts.defaultHeaders = { ...opts.defaultHeaders, ...this.options.headers };
}
if (this.options.defaultQuery) {
opts.defaultQuery = this.options.defaultQuery;
}
if (this.options.proxy) {
opts.httpAgent = new HttpsProxyAgent(this.options.proxy);
}
@@ -1185,12 +1207,6 @@ ${convo}
this.azure = !serverless && azureOptions;
this.azureEndpoint =
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
if (serverless === true) {
this.options.defaultQuery = azureOptions.azureOpenAIApiVersion
? { 'api-version': azureOptions.azureOpenAIApiVersion }
: undefined;
this.options.headers['api-key'] = this.apiKey;
}
}
if (this.azure || this.options.azure) {
@@ -1213,7 +1229,7 @@ ${convo}
opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
}
if (this.isOmni === true && modelOptions.max_tokens != null) {
if (this.isO1Model === true && modelOptions.max_tokens != null) {
modelOptions.max_completion_tokens = modelOptions.max_tokens;
delete modelOptions.max_tokens;
}
@@ -1292,54 +1308,15 @@ ${convo}
/** @type {(value: void | PromiseLike<void>) => void} */
let streamResolve;
if (
this.isOmni === true &&
(this.azure || /o1(?!-(?:mini|preview)).*$/.test(modelOptions.model)) &&
!/o3-.*$/.test(this.modelOptions.model) &&
modelOptions.stream
) {
delete modelOptions.stream;
delete modelOptions.stop;
} else if (!this.isOmni && modelOptions.reasoning_effort != null) {
delete modelOptions.reasoning_effort;
}
let reasoningKey = 'reasoning_content';
if (this.useOpenRouter) {
modelOptions.include_reasoning = true;
reasoningKey = 'reasoning';
}
this.streamHandler = new SplitStreamHandler({
reasoningKey,
accumulate: true,
runId: this.responseMessageId,
handlers: {
[GraphEvents.ON_RUN_STEP]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_MESSAGE_DELTA]: (event) => sendEvent(this.options.res, event),
[GraphEvents.ON_REASONING_DELTA]: (event) => sendEvent(this.options.res, event),
},
});
intermediateReply = this.streamHandler.tokens;
if (modelOptions.stream) {
streamPromise = new Promise((resolve) => {
streamResolve = resolve;
});
/** @type {OpenAI.OpenAI.CompletionCreateParamsStreaming} */
const params = {
...modelOptions,
stream: true,
};
if (
this.options.endpoint === EModelEndpoint.openAI ||
this.options.endpoint === EModelEndpoint.azureOpenAI
) {
params.stream_options = { include_usage: true };
}
const stream = await openai.beta.chat.completions
.stream(params)
.stream({
...modelOptions,
stream: true,
})
.on('abort', () => {
/* Do nothing here */
})
@@ -1357,44 +1334,20 @@ ${convo}
}
if (typeof finalMessage.content !== 'string' || finalMessage.content.trim() === '') {
finalChatCompletion.choices[0].message.content = this.streamHandler.tokens.join('');
finalChatCompletion.choices[0].message.content = intermediateReply.join('');
}
})
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({
role: 'assistant',
content: this.streamHandler.tokens.join(''),
});
stream.messages.push({ role: 'assistant', content: intermediateReply.join('') });
UnexpectedRoleError = true;
}
});
if (this.continued === true) {
const latestText = addSpaceIfNeeded(
this.currentMessages[this.currentMessages.length - 1]?.text ?? '',
);
this.streamHandler.handle({
choices: [
{
delta: {
content: latestText,
},
},
],
});
}
for await (const chunk of stream) {
// Add finish_reason: null if missing in any choice
if (chunk.choices) {
chunk.choices.forEach((choice) => {
if (!('finish_reason' in choice)) {
choice.finish_reason = null;
}
});
}
this.streamHandler.handle(chunk);
const token = chunk.choices[0]?.delta?.content || '';
intermediateReply.push(token);
onProgress(token);
if (abortController.signal.aborted) {
stream.controller.abort();
break;
@@ -1437,7 +1390,7 @@ ${convo}
if (!Array.isArray(choices) || choices.length === 0) {
logger.warn('[OpenAIClient] Chat completion response has no choices');
return this.streamHandler.tokens.join('');
return intermediateReply.join('');
}
const { message, finish_reason } = choices[0] ?? {};
@@ -1447,11 +1400,11 @@ ${convo}
if (!message) {
logger.warn('[OpenAIClient] Message is undefined in chatCompletion response');
return this.streamHandler.tokens.join('');
return intermediateReply.join('');
}
if (typeof message.content !== 'string' || message.content.trim() === '') {
const reply = this.streamHandler.tokens.join('');
const reply = intermediateReply.join('');
logger.debug(
'[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content',
{ intermediateReply: reply },
@@ -1459,27 +1412,13 @@ ${convo}
return reply;
}
if (
this.streamHandler.reasoningTokens.length > 0 &&
this.options.context !== 'title' &&
!message.content.startsWith('<think>')
) {
return this.getStreamText();
} else if (
this.streamHandler.reasoningTokens.length > 0 &&
this.options.context !== 'title' &&
message.content.startsWith('<think>')
) {
return this.getStreamText();
}
return message.content;
} catch (err) {
if (
err?.message?.includes('abort') ||
(err instanceof OpenAI.APIError && err?.message?.includes('abort'))
) {
return this.getStreamText(intermediateReply);
return intermediateReply.join('');
}
if (
err?.message?.includes(
@@ -1494,18 +1433,10 @@ ${convo}
(err instanceof OpenAI.OpenAIError && err?.message?.includes('missing finish_reason'))
) {
logger.error('[OpenAIClient] Known OpenAI error:', err);
if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
} else {
throw err;
}
return intermediateReply.join('');
} else if (err instanceof OpenAI.APIError) {
if (this.streamHandler && this.streamHandler.reasoningTokens.length) {
return this.getStreamText();
} else if (intermediateReply.length > 0) {
return this.getStreamText(intermediateReply);
if (intermediateReply.length > 0) {
return intermediateReply.join('');
} else {
throw err;
}

View File

@@ -1,4 +1,5 @@
const OpenAIClient = require('./OpenAIClient');
const { CacheKeys, Time } = require('librechat-data-provider');
const { CallbackManager } = require('@langchain/core/callbacks/manager');
const { BufferMemory, ChatMessageHistory } = require('langchain/memory');
const { addImages, buildErrorInput, buildPromptPrefix } = require('./output_parsers');
@@ -10,6 +11,7 @@ const checkBalance = require('~/models/checkBalance');
const { isEnabled } = require('~/server/utils');
const { extractBaseURL } = require('~/utils');
const { loadTools } = require('./tools/util');
const { getLogStores } = require('~/cache');
const { logger } = require('~/config');
class PluginsClient extends OpenAIClient {
@@ -41,7 +43,6 @@ class PluginsClient extends OpenAIClient {
return {
artifacts: this.options.artifacts,
chatGptLabel: this.options.chatGptLabel,
modelLabel: this.options.modelLabel,
promptPrefix: this.options.promptPrefix,
tools: this.options.tools,
...this.modelOptions,
@@ -104,7 +105,7 @@ class PluginsClient extends OpenAIClient {
chatHistory: new ChatMessageHistory(pastMessages),
});
const { loadedTools } = await loadTools({
this.tools = await loadTools({
user,
model,
tools: this.options.tools,
@@ -118,15 +119,12 @@ class PluginsClient extends OpenAIClient {
processFileURL,
message,
},
useSpecs: true,
});
if (loadedTools.length === 0) {
if (this.tools.length === 0) {
return;
}
this.tools = loadedTools;
logger.debug('[PluginsClient] Requested Tools', this.options.tools);
logger.debug(
'[PluginsClient] Loaded Tools',
@@ -254,6 +252,15 @@ class PluginsClient extends OpenAIClient {
}
this.responsePromise = this.saveMessageToDatabase(responseMessage, saveOptions, user);
const messageCache = getLogStores(CacheKeys.MESSAGES);
messageCache.set(
responseMessage.messageId,
{
text: responseMessage.text,
complete: true,
},
Time.FIVE_MINUTES,
);
delete responseMessage.tokenCount;
return { ...responseMessage, ...result };
}
@@ -280,6 +287,7 @@ class PluginsClient extends OpenAIClient {
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
const {
user,
isEdited,
conversationId,
responseMessageId,
saveOptions,
@@ -358,6 +366,7 @@ class PluginsClient extends OpenAIClient {
conversationId,
parentMessageId: userMessage.messageId,
isCreatedByUser: false,
isEdited,
model: this.modelOptions.model,
sender: this.sender,
promptTokens,

View File

@@ -17,7 +17,7 @@ const { isEnabled } = require('~/server/utils');
*
* @example
* const llm = createLLM({
* modelOptions: { modelName: 'gpt-4o-mini', temperature: 0.2 },
* modelOptions: { modelName: 'gpt-3.5-turbo', temperature: 0.2 },
* configOptions: { basePath: 'https://example.api/path' },
* callbacks: { onMessage: handleMessage },
* openAIApiKey: 'your-api-key'

View File

@@ -3,7 +3,7 @@ const { ChatOpenAI } = require('@langchain/openai');
const { getBufferString, ConversationSummaryBufferMemory } = require('langchain/memory');
const chatPromptMemory = new ConversationSummaryBufferMemory({
llm: new ChatOpenAI({ modelName: 'gpt-4o-mini', temperature: 0 }),
llm: new ChatOpenAI({ modelName: 'gpt-3.5-turbo', temperature: 0 }),
maxTokenLimit: 10,
returnMessages: true,
});

View File

@@ -1,7 +1,7 @@
/**
* Anthropic API: Adds cache control to the appropriate user messages in the payload.
* @param {Array<AnthropicMessage | BaseMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage | BaseMessage>} - The updated array of message objects with cache control added.
* @param {Array<AnthropicMessage>} messages - The array of message objects.
* @returns {Array<AnthropicMessage>} - The updated array of message objects with cache control added.
*/
function addCacheControl(messages) {
if (!Array.isArray(messages) || messages.length < 2) {
@@ -13,9 +13,7 @@ function addCacheControl(messages) {
for (let i = updatedMessages.length - 1; i >= 0 && userMessagesModified < 2; i--) {
const message = updatedMessages[i];
if (message.getType != null && message.getType() !== 'human') {
continue;
} else if (message.getType == null && message.role !== 'user') {
if (message.role !== 'user') {
continue;
}

View File

@@ -282,47 +282,4 @@ describe('formatAgentMessages', () => {
// Additional check to ensure the consecutive assistant messages were combined
expect(result[1].content).toHaveLength(2);
});
it('should skip THINK type content parts', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Initial response' },
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Reasoning about the problem...' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final answer' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(result[0].content).toEqual('Initial response\nFinal answer');
});
it('should join TEXT content as string when THINK content type is present', () => {
const payload = [
{
role: 'assistant',
content: [
{ type: ContentTypes.THINK, [ContentTypes.THINK]: 'Analyzing the problem...' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'First part of response' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Second part of response' },
{ type: ContentTypes.TEXT, [ContentTypes.TEXT]: 'Final part of response' },
],
},
];
const result = formatAgentMessages(payload);
expect(result).toHaveLength(1);
expect(result[0]).toBeInstanceOf(AIMessage);
expect(typeof result[0].content).toBe('string');
expect(result[0].content).toBe(
'First part of response\nSecond part of response\nFinal part of response',
);
expect(result[0].content).not.toContain('Analyzing the problem...');
});
});

View File

@@ -153,7 +153,6 @@ const formatAgentMessages = (payload) => {
let currentContent = [];
let lastAIMessage = null;
let hasReasoning = false;
for (const part of message.content) {
if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
/*
@@ -205,28 +204,14 @@ const formatAgentMessages = (payload) => {
new ToolMessage({
tool_call_id: tool_call.id,
name: tool_call.name,
content: output || '',
content: output,
}),
);
} else if (part.type === ContentTypes.THINK) {
hasReasoning = true;
continue;
} else {
currentContent.push(part);
}
}
if (hasReasoning) {
currentContent = currentContent
.reduce((acc, curr) => {
if (curr.type === ContentTypes.TEXT) {
return `${acc}${curr[ContentTypes.TEXT]}\n`;
}
return acc;
}, '')
.trim();
}
if (currentContent.length > 0) {
messages.push(new AIMessage({ content: currentContent }));
}

View File

@@ -60,6 +60,7 @@ describe('formatMessage', () => {
error: false,
finish_reason: null,
isCreatedByUser: true,
isEdited: false,
model: null,
parentMessageId: Constants.NO_PARENT,
sender: 'User',

View File

@@ -4,7 +4,7 @@ const summaryPrompts = require('./summaryPrompts');
const handleInputs = require('./handleInputs');
const instructions = require('./instructions');
const titlePrompts = require('./titlePrompts');
const truncate = require('./truncate');
const truncateText = require('./truncateText');
const createVisionPrompt = require('./createVisionPrompt');
const createContextHandlers = require('./createContextHandlers');
@@ -15,7 +15,7 @@ module.exports = {
...handleInputs,
...instructions,
...titlePrompts,
...truncate,
...truncateText,
createVisionPrompt,
createContextHandlers,
};

View File

@@ -1,115 +0,0 @@
const MAX_CHAR = 255;
/**
* Truncates a given text to a specified maximum length, appending ellipsis and a notification
* if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text if the original text length exceeds maxLength, otherwise returns the original text.
*/
function truncateText(text, maxLength = MAX_CHAR) {
if (text.length > maxLength) {
return `${text.slice(0, maxLength)}... [text truncated for brevity]`;
}
return text;
}
/**
* Truncates a given text to a specified maximum length by showing the first half and the last half of the text,
* separated by ellipsis. This method ensures the output does not exceed the maximum length, including the addition
* of ellipsis and notification if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the output text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text showing the first half and the last half, or the original text if it does not exceed maxLength.
*/
function smartTruncateText(text, maxLength = MAX_CHAR) {
const ellipsis = '...';
const notification = ' [text truncated for brevity]';
const halfMaxLength = Math.floor((maxLength - ellipsis.length - notification.length) / 2);
if (text.length > maxLength) {
const startLastHalf = text.length - halfMaxLength;
return `${text.slice(0, halfMaxLength)}${ellipsis}${text.slice(startLastHalf)}${notification}`;
}
return text;
}
/**
* @param {TMessage[]} _messages
* @param {number} maxContextTokens
* @param {function({role: string, content: TMessageContent[]}): number} getTokenCountForMessage
*
* @returns {{
* dbMessages: TMessage[],
* editedIndices: number[]
* }}
*/
function truncateToolCallOutputs(_messages, maxContextTokens, getTokenCountForMessage) {
const THRESHOLD_PERCENTAGE = 0.5;
const targetTokenLimit = maxContextTokens * THRESHOLD_PERCENTAGE;
let currentTokenCount = 3;
const messages = [..._messages];
const processedMessages = [];
let currentIndex = messages.length;
const editedIndices = new Set();
while (messages.length > 0) {
currentIndex--;
const message = messages.pop();
currentTokenCount += message.tokenCount;
if (currentTokenCount < targetTokenLimit) {
processedMessages.push(message);
continue;
}
if (!message.content || !Array.isArray(message.content)) {
processedMessages.push(message);
continue;
}
const toolCallIndices = message.content
.map((item, index) => (item.type === 'tool_call' ? index : -1))
.filter((index) => index !== -1)
.reverse();
if (toolCallIndices.length === 0) {
processedMessages.push(message);
continue;
}
const newContent = [...message.content];
// Truncate all tool outputs since we're over threshold
for (const index of toolCallIndices) {
const toolCall = newContent[index].tool_call;
if (!toolCall || !toolCall.output) {
continue;
}
editedIndices.add(currentIndex);
newContent[index] = {
...newContent[index],
tool_call: {
...toolCall,
output: '[OUTPUT_OMITTED_FOR_BREVITY]',
},
};
}
const truncatedMessage = {
...message,
content: newContent,
tokenCount: getTokenCountForMessage({ role: 'assistant', content: newContent }),
};
processedMessages.push(truncatedMessage);
}
return { dbMessages: processedMessages.reverse(), editedIndices: Array.from(editedIndices) };
}
module.exports = { truncateText, smartTruncateText, truncateToolCallOutputs };

View File

@@ -0,0 +1,40 @@
const MAX_CHAR = 255;
/**
* Truncates a given text to a specified maximum length, appending ellipsis and a notification
* if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text if the original text length exceeds maxLength, otherwise returns the original text.
*/
function truncateText(text, maxLength = MAX_CHAR) {
if (text.length > maxLength) {
return `${text.slice(0, maxLength)}... [text truncated for brevity]`;
}
return text;
}
/**
* Truncates a given text to a specified maximum length by showing the first half and the last half of the text,
* separated by ellipsis. This method ensures the output does not exceed the maximum length, including the addition
* of ellipsis and notification if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the output text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text showing the first half and the last half, or the original text if it does not exceed maxLength.
*/
function smartTruncateText(text, maxLength = MAX_CHAR) {
const ellipsis = '...';
const notification = ' [text truncated for brevity]';
const halfMaxLength = Math.floor((maxLength - ellipsis.length - notification.length) / 2);
if (text.length > maxLength) {
const startLastHalf = text.length - halfMaxLength;
return `${text.slice(0, halfMaxLength)}${ellipsis}${text.slice(startLastHalf)}${notification}`;
}
return text;
}
module.exports = { truncateText, smartTruncateText };

View File

@@ -1,4 +1,3 @@
const { SplitStreamHandler } = require('@librechat/agents');
const { anthropicSettings } = require('librechat-data-provider');
const AnthropicClient = require('~/app/clients/AnthropicClient');
@@ -406,278 +405,4 @@ describe('AnthropicClient', () => {
expect(Number.isNaN(result)).toBe(false);
});
});
describe('maxOutputTokens handling for different models', () => {
it('should not cap maxOutputTokens for Claude 3.5 Sonnet models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 10;
client.setOptions({
modelOptions: {
model: 'claude-3-5-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.5-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should not cap maxOutputTokens for Claude 3.7 models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
client.setOptions({
modelOptions: {
model: 'claude-3-7-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(highTokenValue);
});
it('should cap maxOutputTokens for Claude 3.5 Haiku models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
client.setOptions({
modelOptions: {
model: 'claude-3-5-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
// Test with decimal notation
client.setOptions({
modelOptions: {
model: 'claude-3.5-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
it('should cap maxOutputTokens for Claude 3 Haiku and Opus models', () => {
const client = new AnthropicClient('test-api-key');
const highTokenValue = anthropicSettings.legacy.maxOutputTokens.default * 2;
// Test haiku
client.setOptions({
modelOptions: {
model: 'claude-3-haiku',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
// Test opus
client.setOptions({
modelOptions: {
model: 'claude-3-opus',
maxOutputTokens: highTokenValue,
},
});
expect(client.modelOptions.maxOutputTokens).toBe(
anthropicSettings.legacy.maxOutputTokens.default,
);
});
});
describe('topK/topP parameters for different models', () => {
beforeEach(() => {
// Mock the SplitStreamHandler
jest.spyOn(SplitStreamHandler.prototype, 'handle').mockImplementation(() => {});
});
afterEach(() => {
jest.restoreAllMocks();
});
it('should include top_k and top_p parameters for non-claude-3.7 models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-opus',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).toHaveProperty('top_k', 10);
expect(capturedOptions).toHaveProperty('top_p', 0.9);
});
it('should include top_k and top_p parameters for claude-3-5-sonnet models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-5-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).toHaveProperty('top_k', 10);
expect(capturedOptions).toHaveProperty('top_p', 0.9);
});
it('should not include top_k and top_p parameters for claude-3-7-sonnet models', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3-7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).not.toHaveProperty('top_k');
expect(capturedOptions).not.toHaveProperty('top_p');
});
it('should not include top_k and top_p parameters for models with decimal notation (claude-3.7)', async () => {
const client = new AnthropicClient('test-api-key');
// Create a mock async generator function
async function* mockAsyncGenerator() {
yield { type: 'message_start', message: { usage: {} } };
yield { delta: { text: 'Test response' } };
yield { type: 'message_delta', usage: {} };
}
// Mock createResponse to return the async generator
jest.spyOn(client, 'createResponse').mockImplementation(() => {
return mockAsyncGenerator();
});
client.setOptions({
modelOptions: {
model: 'claude-3.7-sonnet',
temperature: 0.7,
topK: 10,
topP: 0.9,
},
});
// Mock getClient to capture the request options
let capturedOptions = null;
jest.spyOn(client, 'getClient').mockImplementation((options) => {
capturedOptions = options;
return {};
});
const payload = [{ role: 'user', content: 'Test message' }];
await client.sendCompletion(payload, {});
// Check the options passed to getClient
expect(capturedOptions).not.toHaveProperty('top_k');
expect(capturedOptions).not.toHaveProperty('top_p');
});
});
});

View File

@@ -61,7 +61,7 @@ describe('BaseClient', () => {
const options = {
// debug: true,
modelOptions: {
model: 'gpt-4o-mini',
model: 'gpt-3.5-turbo',
temperature: 0,
},
};
@@ -88,19 +88,6 @@ describe('BaseClient', () => {
const messages = [{ content: 'Hello' }, { content: 'How are you?' }, { content: 'Goodbye' }];
const instructions = { content: 'Please respond to the question.' };
const result = TestClient.addInstructions(messages, instructions);
const expected = [
{ content: 'Please respond to the question.' },
{ content: 'Hello' },
{ content: 'How are you?' },
{ content: 'Goodbye' },
];
expect(result).toEqual(expected);
});
test('returns the input messages with instructions properly added when addInstructions() with legacy flag', () => {
const messages = [{ content: 'Hello' }, { content: 'How are you?' }, { content: 'Goodbye' }];
const instructions = { content: 'Please respond to the question.' };
const result = TestClient.addInstructions(messages, instructions, true);
const expected = [
{ content: 'Hello' },
{ content: 'How are you?' },
@@ -159,7 +146,7 @@ describe('BaseClient', () => {
expectedMessagesToRefine?.[expectedMessagesToRefine.length - 1] ?? {};
const expectedIndex = messages.findIndex((msg) => msg.content === lastExpectedMessage?.content);
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
const result = await TestClient.getMessagesWithinTokenLimit(messages);
expect(result.context).toEqual(expectedContext);
expect(result.summaryIndex).toEqual(expectedIndex);
@@ -195,7 +182,7 @@ describe('BaseClient', () => {
expectedMessagesToRefine?.[expectedMessagesToRefine.length - 1] ?? {};
const expectedIndex = messages.findIndex((msg) => msg.content === lastExpectedMessage?.content);
const result = await TestClient.getMessagesWithinTokenLimit({ messages });
const result = await TestClient.getMessagesWithinTokenLimit(messages);
expect(result.context).toEqual(expectedContext);
expect(result.summaryIndex).toEqual(expectedIndex);
@@ -203,6 +190,66 @@ describe('BaseClient', () => {
expect(result.messagesToRefine).toEqual(expectedMessagesToRefine);
});
test('handles context strategy correctly in handleContextStrategy()', async () => {
TestClient.addInstructions = jest
.fn()
.mockReturnValue([
{ content: 'Hello' },
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
]);
TestClient.getMessagesWithinTokenLimit = jest.fn().mockReturnValue({
context: [
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
],
remainingContextTokens: 80,
messagesToRefine: [{ content: 'Hello' }],
summaryIndex: 3,
});
TestClient.getTokenCount = jest.fn().mockReturnValue(40);
const instructions = { content: 'Please provide more details.' };
const orderedMessages = [
{ content: 'Hello' },
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
];
const formattedMessages = [
{ content: 'Hello' },
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
];
const expectedResult = {
payload: [
{
role: 'system',
content: 'Refined answer',
},
{ content: 'How can I help you?' },
{ content: 'Please provide more details.' },
{ content: 'I can assist you with that.' },
],
promptTokens: expect.any(Number),
tokenCountMap: {},
messages: expect.any(Array),
};
TestClient.shouldSummarize = true;
const result = await TestClient.handleContextStrategy({
instructions,
orderedMessages,
formattedMessages,
});
expect(result).toEqual(expectedResult);
});
describe('getMessagesForConversation', () => {
it('should return an empty array if the parentMessageId does not exist', () => {
const result = TestClient.constructor.getMessagesForConversation({
@@ -568,9 +615,9 @@ describe('BaseClient', () => {
test('getTokenCount for response is called with the correct arguments', async () => {
const tokenCountMap = {}; // Mock tokenCountMap
TestClient.buildMessages.mockReturnValue({ prompt: [], tokenCountMap });
TestClient.getTokenCountForResponse = jest.fn();
TestClient.getTokenCount = jest.fn();
const response = await TestClient.sendMessage('Hello, world!', {});
expect(TestClient.getTokenCountForResponse).toHaveBeenCalledWith(response);
expect(TestClient.getTokenCount).toHaveBeenCalledWith(response.text);
});
test('returns an object with the correct shape', async () => {
@@ -614,112 +661,4 @@ describe('BaseClient', () => {
expect(calls[1][0].isCreatedByUser).toBe(false); // Second call should be for response message
});
});
describe('getMessagesWithinTokenLimit with instructions', () => {
test('should always include instructions when present', async () => {
TestClient.maxContextTokens = 50;
const instructions = {
role: 'system',
content: 'System instructions',
tokenCount: 20,
};
const messages = [
instructions,
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
instructions,
});
expect(result.context[0]).toBe(instructions);
expect(result.remainingContextTokens).toBe(2);
});
test('should handle case when messages exceed limit but instructions must be preserved', async () => {
TestClient.maxContextTokens = 30;
const instructions = {
role: 'system',
content: 'System instructions',
tokenCount: 20,
};
const messages = [
instructions,
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
instructions,
});
// Should only include instructions and the last message that fits
expect(result.context).toHaveLength(1);
expect(result.context[0].content).toBe(instructions.content);
expect(result.messagesToRefine).toHaveLength(2);
expect(result.remainingContextTokens).toBe(7); // 30 - 20 - 3 (assistant label)
});
test('should work correctly without instructions (1/2)', async () => {
TestClient.maxContextTokens = 50;
const messages = [
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
});
expect(result.context).toHaveLength(2);
expect(result.remainingContextTokens).toBe(22); // 50 - 10 - 15 - 3(assistant label)
expect(result.messagesToRefine).toHaveLength(0);
});
test('should work correctly without instructions (2/2)', async () => {
TestClient.maxContextTokens = 30;
const messages = [
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 20 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
});
expect(result.context).toHaveLength(1);
expect(result.remainingContextTokens).toBe(7);
expect(result.messagesToRefine).toHaveLength(1);
});
test('should handle case when only instructions fit within limit', async () => {
TestClient.maxContextTokens = 25;
const instructions = {
role: 'system',
content: 'System instructions',
tokenCount: 20,
};
const messages = [
instructions,
{ role: 'user', content: 'Hello', tokenCount: 10 },
{ role: 'assistant', content: 'Hi there', tokenCount: 15 },
];
const result = await TestClient.getMessagesWithinTokenLimit({
messages,
instructions,
});
expect(result.context).toHaveLength(1);
expect(result.context[0]).toBe(instructions);
expect(result.messagesToRefine).toHaveLength(2);
expect(result.remainingContextTokens).toBe(2); // 25 - 20 - 3(assistant label)
});
});
});

View File

@@ -1,7 +1,5 @@
jest.mock('~/cache/getLogStores');
require('dotenv').config();
const OpenAI = require('openai');
const getLogStores = require('~/cache/getLogStores');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { genAzureChatCompletion } = require('~/utils/azureUtils');
const OpenAIClient = require('../OpenAIClient');
@@ -136,13 +134,7 @@ OpenAI.mockImplementation(() => ({
}));
describe('OpenAIClient', () => {
const mockSet = jest.fn();
const mockCache = { set: mockSet };
beforeEach(() => {
getLogStores.mockReturnValue(mockCache);
});
let client;
let client, client2;
const model = 'gpt-4';
const parentMessageId = '1';
const messages = [
@@ -184,6 +176,7 @@ describe('OpenAIClient', () => {
beforeEach(() => {
const options = { ...defaultOptions };
client = new OpenAIClient('test-api-key', options);
client2 = new OpenAIClient('test-api-key', options);
client.summarizeMessages = jest.fn().mockResolvedValue({
role: 'assistant',
content: 'Refined answer',
@@ -192,6 +185,7 @@ describe('OpenAIClient', () => {
client.buildPrompt = jest
.fn()
.mockResolvedValue({ prompt: messages.map((m) => m.text).join('\n') });
client.constructor.freeAndResetAllEncoders();
client.getMessages = jest.fn().mockResolvedValue([]);
});
@@ -202,6 +196,14 @@ describe('OpenAIClient', () => {
expect(client.modelOptions.temperature).toBe(0.7);
});
it('should set apiKey and useOpenRouter if OPENROUTER_API_KEY is present', () => {
process.env.OPENROUTER_API_KEY = 'openrouter-key';
client.setOptions({});
expect(client.apiKey).toBe('openrouter-key');
expect(client.useOpenRouter).toBe(true);
delete process.env.OPENROUTER_API_KEY; // Cleanup
});
it('should set FORCE_PROMPT based on OPENAI_FORCE_PROMPT or reverseProxyUrl', () => {
process.env.OPENAI_FORCE_PROMPT = 'true';
client.setOptions({});
@@ -219,7 +221,7 @@ describe('OpenAIClient', () => {
it('should set isChatCompletion based on useOpenRouter, reverseProxyUrl, or model', () => {
client.setOptions({ reverseProxyUrl: null });
// true by default since default model will be gpt-4o-mini
// true by default since default model will be gpt-3.5-turbo
expect(client.isChatCompletion).toBe(true);
client.isChatCompletion = undefined;
@@ -228,7 +230,7 @@ describe('OpenAIClient', () => {
expect(client.isChatCompletion).toBe(false);
client.isChatCompletion = undefined;
client.setOptions({ modelOptions: { model: 'gpt-4o-mini' }, reverseProxyUrl: null });
client.setOptions({ modelOptions: { model: 'gpt-3.5-turbo' }, reverseProxyUrl: null });
expect(client.isChatCompletion).toBe(true);
});
@@ -333,18 +335,83 @@ describe('OpenAIClient', () => {
});
});
describe('selectTokenizer', () => {
it('should get the correct tokenizer based on the instance state', () => {
const tokenizer = client.selectTokenizer();
expect(tokenizer).toBeDefined();
});
});
describe('freeAllTokenizers', () => {
it('should free all tokenizers', () => {
// Create a tokenizer
const tokenizer = client.selectTokenizer();
// Mock 'free' method on the tokenizer
tokenizer.free = jest.fn();
client.constructor.freeAndResetAllEncoders();
// Check if 'free' method has been called on the tokenizer
expect(tokenizer.free).toHaveBeenCalled();
});
});
describe('getTokenCount', () => {
it('should return the correct token count', () => {
const count = client.getTokenCount('Hello, world!');
expect(count).toBeGreaterThan(0);
});
it('should reset the encoder and count when count reaches 25', () => {
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
// Call getTokenCount 25 times
for (let i = 0; i < 25; i++) {
client.getTokenCount('test text');
}
expect(freeAndResetEncoderSpy).toHaveBeenCalled();
});
it('should not reset the encoder and count when count is less than 25', () => {
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
freeAndResetEncoderSpy.mockClear();
// Call getTokenCount 24 times
for (let i = 0; i < 24; i++) {
client.getTokenCount('test text');
}
expect(freeAndResetEncoderSpy).not.toHaveBeenCalled();
});
it('should handle errors and reset the encoder', () => {
const freeAndResetEncoderSpy = jest.spyOn(client.constructor, 'freeAndResetAllEncoders');
// Mock encode function to throw an error
client.selectTokenizer().encode = jest.fn().mockImplementation(() => {
throw new Error('Test error');
});
client.getTokenCount('test text');
expect(freeAndResetEncoderSpy).toHaveBeenCalled();
});
it('should not throw null pointer error when freeing the same encoder twice', () => {
client.constructor.freeAndResetAllEncoders();
client2.constructor.freeAndResetAllEncoders();
const count = client2.getTokenCount('test text');
expect(count).toBeGreaterThan(0);
});
});
describe('getSaveOptions', () => {
it('should return the correct save options', () => {
const options = client.getSaveOptions();
expect(options).toHaveProperty('chatGptLabel');
expect(options).toHaveProperty('modelLabel');
expect(options).toHaveProperty('promptPrefix');
});
});
@@ -480,6 +547,7 @@ describe('OpenAIClient', () => {
testCases.forEach((testCase) => {
it(`should return ${testCase.expected} tokens for model ${testCase.model}`, () => {
client.modelOptions.model = testCase.model;
client.selectTokenizer();
// 3 tokens for assistant label
let totalTokens = 3;
for (let message of example_messages) {
@@ -513,6 +581,7 @@ describe('OpenAIClient', () => {
it(`should return ${expectedTokens} tokens for model ${visionModel} (Vision Request)`, () => {
client.modelOptions.model = visionModel;
client.selectTokenizer();
// 3 tokens for assistant label
let totalTokens = 3;
for (let message of vision_request) {
@@ -526,6 +595,7 @@ describe('OpenAIClient', () => {
afterEach(() => {
delete process.env.AZURE_OPENAI_DEFAULT_MODEL;
delete process.env.AZURE_USE_MODEL_AS_DEPLOYMENT_NAME;
delete process.env.OPENROUTER_API_KEY;
});
it('should call getCompletion and fetchEventSource when using a text/instruct model', async () => {

View File

@@ -2,8 +2,6 @@ const availableTools = require('./manifest.json');
// Structured Tools
const DALLE3 = require('./structured/DALLE3');
const OpenWeather = require('./structured/OpenWeather');
const createYouTubeTools = require('./structured/YouTube');
const StructuredWolfram = require('./structured/Wolfram');
const StructuredACS = require('./structured/AzureAISearch');
const StructuredSD = require('./structured/StableDiffusion');
@@ -11,31 +9,14 @@ const GoogleSearchAPI = require('./structured/GoogleSearch');
const TraversaalSearch = require('./structured/TraversaalSearch');
const TavilySearchResults = require('./structured/TavilySearchResults');
/** @type {Record<string, TPlugin | undefined>} */
const manifestToolMap = {};
/** @type {Array<TPlugin>} */
const toolkits = [];
availableTools.forEach((tool) => {
manifestToolMap[tool.pluginKey] = tool;
if (tool.toolkit === true) {
toolkits.push(tool);
}
});
module.exports = {
toolkits,
availableTools,
manifestToolMap,
// Structured Tools
DALLE3,
OpenWeather,
StructuredSD,
StructuredACS,
GoogleSearchAPI,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
};

View File

@@ -30,20 +30,6 @@
}
]
},
{
"name": "YouTube",
"pluginKey": "youtube",
"toolkit": true,
"description": "Get YouTube video information, retrieve comments, analyze transcripts and search for videos.",
"icon": "https://www.youtube.com/s/desktop/7449ebf7/img/favicon_144x144.png",
"authConfig": [
{
"authField": "YOUTUBE_API_KEY",
"label": "YouTube API Key",
"description": "Your YouTube Data API v3 key."
}
]
},
{
"name": "Wolfram",
"pluginKey": "wolfram",
@@ -114,6 +100,7 @@
"pluginKey": "calculator",
"description": "Perform simple and complex mathematical calculations.",
"icon": "https://i.imgur.com/RHsSG5h.png",
"isAuthRequired": "false",
"authConfig": []
},
{
@@ -148,20 +135,7 @@
{
"authField": "AZURE_AI_SEARCH_API_KEY",
"label": "Azure AI Search API Key",
"description": "You need to provide your API Key for Azure AI Search."
}
]
},
{
"name": "OpenWeather",
"pluginKey": "open_weather",
"description": "Get weather forecasts and historical data from the OpenWeather API",
"icon": "/assets/openweather.png",
"authConfig": [
{
"authField": "OPENWEATHER_API_KEY",
"label": "OpenWeather API Key",
"description": "Sign up at <a href=\"https://home.openweathermap.org/users/sign_up\" target=\"_blank\">OpenWeather</a>, then get your key at <a href=\"https://home.openweathermap.org/api_keys\" target=\"_blank\">API keys</a>."
"description": "You need to provideq your API Key for Azure AI Search."
}
]
}

View File

@@ -19,8 +19,6 @@ class DALLE3 extends Tool {
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
/** @type {boolean} */
this.isAgent = fields.isAgent;
if (fields.processFileURL) {
/** @type {processFileURL} Necessary for output to contain all image metadata. */
this.processFileURL = fields.processFileURL.bind(this);
@@ -110,19 +108,6 @@ class DALLE3 extends Tool {
return `![generated image](${imageUrl})`;
}
returnValue(value) {
if (this.isAgent === true && typeof value === 'string') {
return [value, {}];
} else if (this.isAgent === true && typeof value === 'object') {
return [
'DALL-E displayed an image. All generated images are already plainly visible, so don\'t repeat the descriptions in detail. Do not list download links as they are available in the UI already. The user may download the images by clicking on them, but do not mention anything about downloading to the user.',
value,
];
}
return value;
}
async _call(data) {
const { prompt, quality = 'standard', size = '1024x1024', style = 'vivid' } = data;
if (!prompt) {
@@ -141,23 +126,18 @@ class DALLE3 extends Tool {
});
} catch (error) {
logger.error('[DALL-E-3] Problem generating the image:', error);
return this
.returnValue(`Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
Error Message: ${error.message}`);
return `Something went wrong when trying to generate the image. The DALL-E API may be unavailable:
Error Message: ${error.message}`;
}
if (!resp) {
return this.returnValue(
'Something went wrong when trying to generate the image. The DALL-E API may be unavailable',
);
return 'Something went wrong when trying to generate the image. The DALL-E API may be unavailable';
}
const theImageUrl = resp.data[0].url;
if (!theImageUrl) {
return this.returnValue(
'No image URL returned from OpenAI API. There may be a problem with the API or your configuration.',
);
return 'No image URL returned from OpenAI API. There may be a problem with the API or your configuration.';
}
const imageBasename = getImageBasename(theImageUrl);
@@ -177,11 +157,11 @@ Error Message: ${error.message}`);
try {
const result = await this.processFileURL({
URL: theImageUrl,
basePath: 'images',
userId: this.userId,
fileName: imageName,
fileStrategy: this.fileStrategy,
userId: this.userId,
URL: theImageUrl,
fileName: imageName,
basePath: 'images',
context: FileContext.image_generation,
});
@@ -195,7 +175,7 @@ Error Message: ${error.message}`);
this.result = `Failed to save the image locally. ${error.message}`;
}
return this.returnValue(this.result);
return this.result;
}
}

View File

@@ -1,317 +0,0 @@
const { Tool } = require('@langchain/core/tools');
const { z } = require('zod');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
const fetch = require('node-fetch');
/**
* Map user-friendly units to OpenWeather units.
* Defaults to Celsius if not specified.
*/
function mapUnitsToOpenWeather(unit) {
if (!unit) {
return 'metric';
} // Default to Celsius
switch (unit) {
case 'Celsius':
return 'metric';
case 'Kelvin':
return 'standard';
case 'Fahrenheit':
return 'imperial';
default:
return 'metric'; // fallback
}
}
/**
* Recursively round temperature fields in the API response.
*/
function roundTemperatures(obj) {
const tempKeys = new Set([
'temp',
'feels_like',
'dew_point',
'day',
'min',
'max',
'night',
'eve',
'morn',
'afternoon',
'morning',
'evening',
]);
if (Array.isArray(obj)) {
return obj.map((item) => roundTemperatures(item));
} else if (obj && typeof obj === 'object') {
for (const key of Object.keys(obj)) {
const value = obj[key];
if (value && typeof value === 'object') {
obj[key] = roundTemperatures(value);
} else if (typeof value === 'number' && tempKeys.has(key)) {
obj[key] = Math.round(value);
}
}
}
return obj;
}
class OpenWeather extends Tool {
name = 'open_weather';
description =
'Provides weather data from OpenWeather One Call API 3.0. ' +
'Actions: help, current_forecast, timestamp, daily_aggregation, overview. ' +
'If lat/lon not provided, specify "city" for geocoding. ' +
'Units: "Celsius", "Kelvin", or "Fahrenheit" (default: Celsius). ' +
'For timestamp action, use "date" in YYYY-MM-DD format.';
schema = z.object({
action: z.enum(['help', 'current_forecast', 'timestamp', 'daily_aggregation', 'overview']),
city: z.string().optional(),
lat: z.number().optional(),
lon: z.number().optional(),
exclude: z.string().optional(),
units: z.enum(['Celsius', 'Kelvin', 'Fahrenheit']).optional(),
lang: z.string().optional(),
date: z.string().optional(), // For timestamp and daily_aggregation
tz: z.string().optional(),
});
constructor(fields = {}) {
super();
this.envVar = 'OPENWEATHER_API_KEY';
this.override = fields.override ?? false;
this.apiKey = fields[this.envVar] ?? this.getApiKey();
}
getApiKey() {
const key = getEnvironmentVariable(this.envVar);
if (!key && !this.override) {
throw new Error(`Missing ${this.envVar} environment variable.`);
}
return key;
}
async geocodeCity(city) {
const geocodeUrl = `https://api.openweathermap.org/geo/1.0/direct?q=${encodeURIComponent(
city,
)}&limit=1&appid=${this.apiKey}`;
const res = await fetch(geocodeUrl);
const data = await res.json();
if (!res.ok || !Array.isArray(data) || data.length === 0) {
throw new Error(`Could not find coordinates for city: ${city}`);
}
return { lat: data[0].lat, lon: data[0].lon };
}
convertDateToUnix(dateStr) {
const parts = dateStr.split('-');
if (parts.length !== 3) {
throw new Error('Invalid date format. Expected YYYY-MM-DD.');
}
const year = parseInt(parts[0], 10);
const month = parseInt(parts[1], 10);
const day = parseInt(parts[2], 10);
if (isNaN(year) || isNaN(month) || isNaN(day)) {
throw new Error('Invalid date format. Expected YYYY-MM-DD with valid numbers.');
}
const dateObj = new Date(Date.UTC(year, month - 1, day, 0, 0, 0));
if (isNaN(dateObj.getTime())) {
throw new Error('Invalid date provided. Cannot parse into a valid date.');
}
return Math.floor(dateObj.getTime() / 1000);
}
async _call(args) {
try {
const { action, city, lat, lon, exclude, units, lang, date, tz } = args;
const owmUnits = mapUnitsToOpenWeather(units);
if (action === 'help') {
return JSON.stringify(
{
title: 'OpenWeather One Call API 3.0 Help',
description: 'Guidance on using the OpenWeather One Call API 3.0.',
endpoints: {
current_and_forecast: {
endpoint: 'data/3.0/onecall',
data_provided: [
'Current weather',
'Minute forecast (1h)',
'Hourly forecast (48h)',
'Daily forecast (8 days)',
'Government weather alerts',
],
required_params: [['lat', 'lon'], ['city']],
optional_params: ['exclude', 'units (Celsius/Kelvin/Fahrenheit)', 'lang'],
usage_example: {
city: 'Knoxville, Tennessee',
units: 'Fahrenheit',
lang: 'en',
},
},
weather_for_timestamp: {
endpoint: 'data/3.0/onecall/timemachine',
data_provided: [
'Historical weather (since 1979-01-01)',
'Future forecast up to 4 days ahead',
],
required_params: [
['lat', 'lon', 'date (YYYY-MM-DD)'],
['city', 'date (YYYY-MM-DD)'],
],
optional_params: ['units (Celsius/Kelvin/Fahrenheit)', 'lang'],
usage_example: {
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Fahrenheit',
lang: 'en',
},
},
daily_aggregation: {
endpoint: 'data/3.0/onecall/day_summary',
data_provided: [
'Aggregated weather data for a specific date (1979-01-02 to 1.5 years ahead)',
],
required_params: [
['lat', 'lon', 'date (YYYY-MM-DD)'],
['city', 'date (YYYY-MM-DD)'],
],
optional_params: ['units (Celsius/Kelvin/Fahrenheit)', 'lang', 'tz'],
usage_example: {
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Celsius',
lang: 'en',
},
},
weather_overview: {
endpoint: 'data/3.0/onecall/overview',
data_provided: ['Human-readable weather summary (today/tomorrow)'],
required_params: [['lat', 'lon'], ['city']],
optional_params: ['date (YYYY-MM-DD)', 'units (Celsius/Kelvin/Fahrenheit)'],
usage_example: {
city: 'Knoxville, Tennessee',
date: '2024-05-13',
units: 'Celsius',
},
},
},
notes: [
'If lat/lon not provided, you can specify a city name and it will be geocoded.',
'For the timestamp action, provide a date in YYYY-MM-DD format instead of a Unix timestamp.',
'By default, temperatures are returned in Celsius.',
'You can specify units as Celsius, Kelvin, or Fahrenheit.',
'All temperatures are rounded to the nearest degree.',
],
errors: [
'400: Bad Request (missing/invalid params)',
'401: Unauthorized (check API key)',
'404: Not Found (no data or city)',
'429: Too many requests',
'5xx: Internal error',
],
},
null,
2,
);
}
let finalLat = lat;
let finalLon = lon;
// If lat/lon not provided but city is given, geocode it
if ((finalLat == null || finalLon == null) && city) {
const coords = await this.geocodeCity(city);
finalLat = coords.lat;
finalLon = coords.lon;
}
if (['current_forecast', 'timestamp', 'daily_aggregation', 'overview'].includes(action)) {
if (typeof finalLat !== 'number' || typeof finalLon !== 'number') {
return 'Error: lat and lon are required and must be numbers for this action (or specify \'city\').';
}
}
const baseUrl = 'https://api.openweathermap.org/data/3.0';
let endpoint = '';
const params = new URLSearchParams({ appid: this.apiKey, units: owmUnits });
let dt;
if (action === 'timestamp') {
if (!date) {
return 'Error: For timestamp action, a \'date\' in YYYY-MM-DD format is required.';
}
dt = this.convertDateToUnix(date);
}
if (action === 'daily_aggregation' && !date) {
return 'Error: date (YYYY-MM-DD) is required for daily_aggregation action.';
}
switch (action) {
case 'current_forecast':
endpoint = '/onecall';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
if (exclude) {
params.append('exclude', exclude);
}
if (lang) {
params.append('lang', lang);
}
break;
case 'timestamp':
endpoint = '/onecall/timemachine';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
params.append('dt', String(dt));
if (lang) {
params.append('lang', lang);
}
break;
case 'daily_aggregation':
endpoint = '/onecall/day_summary';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
params.append('date', date);
if (lang) {
params.append('lang', lang);
}
if (tz) {
params.append('tz', tz);
}
break;
case 'overview':
endpoint = '/onecall/overview';
params.append('lat', String(finalLat));
params.append('lon', String(finalLon));
if (date) {
params.append('date', date);
}
break;
default:
return `Error: Unknown action: ${action}`;
}
const url = `${baseUrl}${endpoint}?${params.toString()}`;
const response = await fetch(url);
const json = await response.json();
if (!response.ok) {
return `Error: OpenWeather API request failed with status ${response.status}: ${
json.message || JSON.stringify(json)
}`;
}
const roundedJson = roundTemperatures(json);
return JSON.stringify(roundedJson);
} catch (err) {
return `Error: ${err.message}`;
}
}
}
module.exports = OpenWeather;

View File

@@ -1,6 +1,6 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { getApiKey } = require('./credentials');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
function createTavilySearchTool(fields = {}) {
const envVar = 'TAVILY_API_KEY';
@@ -8,6 +8,14 @@ function createTavilySearchTool(fields = {}) {
const apiKey = fields.apiKey ?? getApiKey(envVar, override);
const kwargs = fields?.kwargs ?? {};
function getApiKey(envVar, override) {
const key = getEnvironmentVariable(envVar);
if (!key && !override) {
throw new Error(`Missing ${envVar} environment variable.`);
}
return key;
}
return tool(
async (input) => {
const { query, ...rest } = input;

View File

@@ -1,203 +0,0 @@
const { z } = require('zod');
const { tool } = require('@langchain/core/tools');
const { youtube } = require('@googleapis/youtube');
const { YoutubeTranscript } = require('youtube-transcript');
const { getApiKey } = require('./credentials');
const { logger } = require('~/config');
function extractVideoId(url) {
const rawIdRegex = /^[a-zA-Z0-9_-]{11}$/;
if (rawIdRegex.test(url)) {
return url;
}
const regex = new RegExp(
'(?:youtu\\.be/|youtube(?:\\.com)?/(?:' +
'(?:watch\\?v=)|(?:embed/)|(?:shorts/)|(?:live/)|(?:v/)|(?:/))?)' +
'([a-zA-Z0-9_-]{11})(?:\\S+)?$',
);
const match = url.match(regex);
return match ? match[1] : null;
}
function parseTranscript(transcriptResponse) {
if (!Array.isArray(transcriptResponse)) {
return '';
}
return transcriptResponse
.map((entry) => entry.text.trim())
.filter((text) => text)
.join(' ')
.replaceAll('&amp;#39;', '\'');
}
function createYouTubeTools(fields = {}) {
const envVar = 'YOUTUBE_API_KEY';
const override = fields.override ?? false;
const apiKey = fields.apiKey ?? fields[envVar] ?? getApiKey(envVar, override);
const youtubeClient = youtube({
version: 'v3',
auth: apiKey,
});
const searchTool = tool(
async ({ query, maxResults = 5 }) => {
const response = await youtubeClient.search.list({
part: 'snippet',
q: query,
type: 'video',
maxResults: maxResults || 5,
});
const result = response.data.items.map((item) => ({
title: item.snippet.title,
description: item.snippet.description,
url: `https://www.youtube.com/watch?v=${item.id.videoId}`,
}));
return JSON.stringify(result, null, 2);
},
{
name: 'youtube_search',
description: `Search for YouTube videos by keyword or phrase.
- Required: query (search terms to find videos)
- Optional: maxResults (number of videos to return, 1-50, default: 5)
- Returns: List of videos with titles, descriptions, and URLs
- Use for: Finding specific videos, exploring content, research
Example: query="cooking pasta tutorials" maxResults=3`,
schema: z.object({
query: z.string().describe('Search query terms'),
maxResults: z.number().int().min(1).max(50).optional().describe('Number of results (1-50)'),
}),
},
);
const infoTool = tool(
async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.videos.list({
part: 'snippet,statistics',
id: videoId,
});
if (!response.data.items?.length) {
throw new Error('Video not found');
}
const video = response.data.items[0];
const result = {
title: video.snippet.title,
description: video.snippet.description,
views: video.statistics.viewCount,
likes: video.statistics.likeCount,
comments: video.statistics.commentCount,
};
return JSON.stringify(result, null, 2);
},
{
name: 'youtube_info',
description: `Get detailed metadata and statistics for a specific YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Video title, description, view count, like count, comment count
- Use for: Getting video metrics and basic metadata
- DO NOT USE FOR VIDEO SUMMARIES, USE TRANSCRIPTS FOR COMPREHENSIVE ANALYSIS
- Accepts both full URLs and video IDs
Example: url="https://youtube.com/watch?v=abc123" or url="abc123"`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
},
);
const commentsTool = tool(
async ({ url, maxResults = 10 }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
const response = await youtubeClient.commentThreads.list({
part: 'snippet',
videoId,
maxResults: maxResults || 10,
});
const result = response.data.items.map((item) => ({
author: item.snippet.topLevelComment.snippet.authorDisplayName,
text: item.snippet.topLevelComment.snippet.textDisplay,
likes: item.snippet.topLevelComment.snippet.likeCount,
}));
return JSON.stringify(result, null, 2);
},
{
name: 'youtube_comments',
description: `Retrieve top-level comments from a YouTube video.
- Required: url (full YouTube URL or video ID)
- Optional: maxResults (number of comments, 1-50, default: 10)
- Returns: Comment text, author names, like counts
- Use for: Sentiment analysis, audience feedback, engagement review
Example: url="abc123" maxResults=20`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
maxResults: z
.number()
.int()
.min(1)
.max(50)
.optional()
.describe('Number of comments to retrieve'),
}),
},
);
const transcriptTool = tool(
async ({ url }) => {
const videoId = extractVideoId(url);
if (!videoId) {
throw new Error('Invalid YouTube URL or video ID');
}
try {
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'en' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
try {
const transcript = await YoutubeTranscript.fetchTranscript(videoId, { lang: 'de' });
return parseTranscript(transcript);
} catch (e) {
logger.error(e);
}
const transcript = await YoutubeTranscript.fetchTranscript(videoId);
return parseTranscript(transcript);
} catch (error) {
throw new Error(`Failed to fetch transcript: ${error.message}`);
}
},
{
name: 'youtube_transcript',
description: `Fetch and parse the transcript/captions of a YouTube video.
- Required: url (full YouTube URL or video ID)
- Returns: Full video transcript as plain text
- Use for: Content analysis, summarization, translation reference
- This is the "Go-to" tool for analyzing actual video content
- Attempts to fetch English first, then German, then any available language
Example: url="https://youtube.com/watch?v=abc123"`,
schema: z.object({
url: z.string().describe('YouTube video URL or ID'),
}),
},
);
return [searchTool, infoTool, commentsTool, transcriptTool];
}
module.exports = createYouTubeTools;

View File

@@ -1,13 +0,0 @@
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
function getApiKey(envVar, override) {
const key = getEnvironmentVariable(envVar);
if (!key && !override) {
throw new Error(`Missing ${envVar} environment variable.`);
}
return key;
}
module.exports = {
getApiKey,
};

View File

@@ -1,224 +0,0 @@
// __tests__/openWeather.integration.test.js
const OpenWeather = require('../OpenWeather');
describe('OpenWeather Tool (Integration Test)', () => {
let tool;
beforeAll(() => {
tool = new OpenWeather({ override: true });
console.log('API Key present:', !!process.env.OPENWEATHER_API_KEY);
});
test('current_forecast with a real API key returns current weather', async () => {
// Check if API key is available
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
const result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Celsius',
});
console.log('Raw API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('current');
expect(typeof parsed.current.temp).toBe('number');
} catch (error) {
console.error('Test failed with error:', error);
throw error;
}
});
test('timestamp action with real API key returns historical data', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Use a date from yesterday to ensure data availability
const yesterday = new Date();
yesterday.setDate(yesterday.getDate() - 1);
const dateStr = yesterday.toISOString().split('T')[0];
const result = await tool.call({
action: 'timestamp',
city: 'London',
date: dateStr,
units: 'Celsius',
});
console.log('Timestamp API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('data');
expect(Array.isArray(parsed.data)).toBe(true);
expect(parsed.data[0]).toHaveProperty('temp');
} catch (error) {
console.error('Timestamp test failed with error:', error);
throw error;
}
});
test('daily_aggregation action with real API key returns aggregated data', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Use yesterday's date for aggregation
const yesterday = new Date();
yesterday.setDate(yesterday.getDate() - 1);
const dateStr = yesterday.toISOString().split('T')[0];
const result = await tool.call({
action: 'daily_aggregation',
city: 'London',
date: dateStr,
units: 'Celsius',
});
console.log('Daily aggregation API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('temperature');
expect(parsed.temperature).toHaveProperty('morning');
expect(parsed.temperature).toHaveProperty('afternoon');
expect(parsed.temperature).toHaveProperty('evening');
} catch (error) {
console.error('Daily aggregation test failed with error:', error);
throw error;
}
});
test('overview action with real API key returns weather summary', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
const result = await tool.call({
action: 'overview',
city: 'London',
units: 'Celsius',
});
console.log('Overview API response:', result);
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('weather_overview');
expect(typeof parsed.weather_overview).toBe('string');
expect(parsed.weather_overview.length).toBeGreaterThan(0);
expect(parsed).toHaveProperty('date');
expect(parsed).toHaveProperty('units');
expect(parsed.units).toBe('metric');
} catch (error) {
console.error('Overview test failed with error:', error);
throw error;
}
});
test('different temperature units return correct values', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Test Celsius
let result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Celsius',
});
let parsed = JSON.parse(result);
const celsiusTemp = parsed.current.temp;
// Test Kelvin
result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Kelvin',
});
parsed = JSON.parse(result);
const kelvinTemp = parsed.current.temp;
// Test Fahrenheit
result = await tool.call({
action: 'current_forecast',
city: 'London',
units: 'Fahrenheit',
});
parsed = JSON.parse(result);
const fahrenheitTemp = parsed.current.temp;
// Verify temperature conversions are roughly correct
// K = C + 273.15
// F = (C * 9/5) + 32
const celsiusToKelvin = Math.round(celsiusTemp + 273.15);
const celsiusToFahrenheit = Math.round((celsiusTemp * 9) / 5 + 32);
console.log('Temperature comparisons:', {
celsius: celsiusTemp,
kelvin: kelvinTemp,
fahrenheit: fahrenheitTemp,
calculatedKelvin: celsiusToKelvin,
calculatedFahrenheit: celsiusToFahrenheit,
});
// Allow for some rounding differences
expect(Math.abs(kelvinTemp - celsiusToKelvin)).toBeLessThanOrEqual(1);
expect(Math.abs(fahrenheitTemp - celsiusToFahrenheit)).toBeLessThanOrEqual(1);
} catch (error) {
console.error('Temperature units test failed with error:', error);
throw error;
}
});
test('language parameter returns localized data', async () => {
if (!process.env.OPENWEATHER_API_KEY) {
console.warn('Skipping real API test, no OPENWEATHER_API_KEY found.');
return;
}
try {
// Test with English
let result = await tool.call({
action: 'current_forecast',
city: 'Paris',
units: 'Celsius',
lang: 'en',
});
let parsed = JSON.parse(result);
const englishDescription = parsed.current.weather[0].description;
// Test with French
result = await tool.call({
action: 'current_forecast',
city: 'Paris',
units: 'Celsius',
lang: 'fr',
});
parsed = JSON.parse(result);
const frenchDescription = parsed.current.weather[0].description;
console.log('Language comparison:', {
english: englishDescription,
french: frenchDescription,
});
// Verify descriptions are different (indicating translation worked)
expect(englishDescription).not.toBe(frenchDescription);
} catch (error) {
console.error('Language test failed with error:', error);
throw error;
}
});
});

View File

@@ -1,358 +0,0 @@
// __tests__/openweather.test.js
const OpenWeather = require('../OpenWeather');
const fetch = require('node-fetch');
// Mock environment variable
process.env.OPENWEATHER_API_KEY = 'test-api-key';
// Mock the fetch function globally
jest.mock('node-fetch', () => jest.fn());
describe('OpenWeather Tool', () => {
let tool;
beforeAll(() => {
tool = new OpenWeather();
});
beforeEach(() => {
fetch.mockReset();
});
test('action=help returns help instructions', async () => {
const result = await tool.call({
action: 'help',
});
expect(typeof result).toBe('string');
const parsed = JSON.parse(result);
expect(parsed.title).toBe('OpenWeather One Call API 3.0 Help');
});
test('current_forecast with a city and successful geocoding + forecast', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock forecast response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 293.15, feels_like: 295.15 },
daily: [{ temp: { day: 293.15, night: 283.15 } }],
}),
}),
);
const result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Kelvin',
});
const parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(293);
expect(parsed.current.feels_like).toBe(295);
expect(parsed.daily[0].temp.day).toBe(293);
expect(parsed.daily[0].temp.night).toBe(283);
});
test('timestamp action with valid date returns mocked historical data', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock historical weather response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
data: [
{
dt: 1583280000,
temp: 283.15,
feels_like: 280.15,
humidity: 75,
weather: [{ description: 'clear sky' }],
},
],
}),
}),
);
const result = await tool.call({
action: 'timestamp',
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Kelvin',
});
const parsed = JSON.parse(result);
expect(parsed.data[0].temp).toBe(283);
expect(parsed.data[0].feels_like).toBe(280);
});
test('daily_aggregation action returns aggregated weather data', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock daily aggregation response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
date: '2020-03-04',
temperature: {
morning: 283.15,
afternoon: 293.15,
evening: 288.15,
},
humidity: {
morning: 75,
afternoon: 60,
evening: 70,
},
}),
}),
);
const result = await tool.call({
action: 'daily_aggregation',
city: 'Knoxville, Tennessee',
date: '2020-03-04',
units: 'Kelvin',
});
const parsed = JSON.parse(result);
expect(parsed.temperature.morning).toBe(283);
expect(parsed.temperature.afternoon).toBe(293);
expect(parsed.temperature.evening).toBe(288);
});
test('overview action returns weather summary', async () => {
// Mock geocoding response
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock overview response
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => ({
date: '2024-01-07',
lat: 35.9606,
lon: -83.9207,
tz: '+00:00',
units: 'metric',
weather_overview:
'Currently, the temperature is 2°C with a real feel of -2°C. The sky is clear with moderate wind.',
}),
}),
);
const result = await tool.call({
action: 'overview',
city: 'Knoxville, Tennessee',
units: 'Celsius',
});
const parsed = JSON.parse(result);
expect(parsed).toHaveProperty('weather_overview');
expect(typeof parsed.weather_overview).toBe('string');
expect(parsed.weather_overview.length).toBeGreaterThan(0);
expect(parsed).toHaveProperty('date');
expect(parsed).toHaveProperty('units');
expect(parsed.units).toBe('metric');
});
test('temperature units are correctly converted', async () => {
// Mock geocoding response for all three calls
const geocodingMock = Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
// Mock weather response for Kelvin
const kelvinMock = Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 293.15 },
}),
});
// Mock weather response for Celsius
const celsiusMock = Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 20 },
}),
});
// Mock weather response for Fahrenheit
const fahrenheitMock = Promise.resolve({
ok: true,
json: async () => ({
current: { temp: 68 },
}),
});
// Test Kelvin
fetch.mockImplementationOnce(() => geocodingMock).mockImplementationOnce(() => kelvinMock);
let result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Kelvin',
});
let parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(293);
// Test Celsius
fetch.mockImplementationOnce(() => geocodingMock).mockImplementationOnce(() => celsiusMock);
result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Celsius',
});
parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(20);
// Test Fahrenheit
fetch.mockImplementationOnce(() => geocodingMock).mockImplementationOnce(() => fahrenheitMock);
result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
units: 'Fahrenheit',
});
parsed = JSON.parse(result);
expect(parsed.current.temp).toBe(68);
});
test('timestamp action without a date returns an error message', async () => {
const result = await tool.call({
action: 'timestamp',
lat: 35.9606,
lon: -83.9207,
});
expect(result).toMatch(
/Error: For timestamp action, a 'date' in YYYY-MM-DD format is required./,
);
});
test('daily_aggregation action without a date returns an error message', async () => {
const result = await tool.call({
action: 'daily_aggregation',
lat: 35.9606,
lon: -83.9207,
});
expect(result).toMatch(/Error: date \(YYYY-MM-DD\) is required for daily_aggregation action./);
});
test('unknown action returns an error due to schema validation', async () => {
await expect(
tool.call({
action: 'unknown_action',
}),
).rejects.toThrow(/Received tool input did not match expected schema/);
});
test('geocoding failure returns a descriptive error', async () => {
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => [],
}),
);
const result = await tool.call({
action: 'current_forecast',
city: 'NowhereCity',
});
expect(result).toMatch(/Error: Could not find coordinates for city: NowhereCity/);
});
test('API request failure returns an error', async () => {
// Mock geocoding success
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
}),
);
// Mock weather request failure
fetch.mockImplementationOnce(() =>
Promise.resolve({
ok: false,
status: 404,
json: async () => ({ message: 'Not found' }),
}),
);
const result = await tool.call({
action: 'current_forecast',
city: 'Knoxville, Tennessee',
});
expect(result).toMatch(/Error: OpenWeather API request failed with status 404: Not found/);
});
test('invalid date format returns an error', async () => {
// Mock geocoding response first
fetch.mockImplementationOnce((url) => {
if (url.includes('geo/1.0/direct')) {
return Promise.resolve({
ok: true,
json: async () => [{ lat: 35.9606, lon: -83.9207 }],
});
}
return Promise.reject('Unexpected fetch call for geocoding');
});
// Mock timestamp API response
fetch.mockImplementationOnce((url) => {
if (url.includes('onecall/timemachine')) {
throw new Error('Invalid date format. Expected YYYY-MM-DD.');
}
return Promise.reject('Unexpected fetch call');
});
const result = await tool.call({
action: 'timestamp',
city: 'Knoxville, Tennessee',
date: '03-04-2020', // Wrong format
});
expect(result).toMatch(/Error: Invalid date format. Expected YYYY-MM-DD./);
});
});

View File

@@ -0,0 +1,104 @@
const { z } = require('zod');
const axios = require('axios');
const { tool } = require('@langchain/core/tools');
const { Tools, EToolResources } = require('librechat-data-provider');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
/**
*
* @param {Object} options
* @param {ServerRequest} options.req
* @param {Agent['tool_resources']} options.tool_resources
* @returns
*/
const createFileSearchTool = async (options) => {
const { req, tool_resources } = options;
const file_ids = tool_resources?.[EToolResources.file_search]?.file_ids ?? [];
const files = (await getFiles({ file_id: { $in: file_ids } })).map((file) => ({
file_id: file.file_id,
filename: file.filename,
}));
const fileList = files.map((file) => `- ${file.filename}`).join('\n');
const toolDescription = `Performs a semantic search based on a natural language query across the following files:\n${fileList}`;
const FileSearch = tool(
async ({ query }) => {
if (files.length === 0) {
return 'No files to search. Instruct the user to add files for the search.';
}
const jwtToken = req.headers.authorization.split(' ')[1];
if (!jwtToken) {
return 'There was an error authenticating the file search request.';
}
const queryPromises = files.map((file) =>
axios
.post(
`${process.env.RAG_API_URL}/query`,
{
file_id: file.file_id,
query,
k: 5,
},
{
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
},
},
)
.catch((error) => {
logger.error(
`Error encountered in \`file_search\` while querying file_id ${file._id}:`,
error,
);
return null;
}),
);
const results = await Promise.all(queryPromises);
const validResults = results.filter((result) => result !== null);
if (validResults.length === 0) {
return 'No results found or errors occurred while searching the files.';
}
const formattedResults = validResults
.flatMap((result) =>
result.data.map(([docInfo, relevanceScore]) => ({
filename: docInfo.metadata.source.split('/').pop(),
content: docInfo.page_content,
relevanceScore,
})),
)
.sort((a, b) => b.relevanceScore - a.relevanceScore);
const formattedString = formattedResults
.map(
(result) =>
`File: ${result.filename}\nRelevance: ${result.relevanceScore.toFixed(4)}\nContent: ${
result.content
}\n`,
)
.join('\n---\n');
return formattedString;
},
{
name: Tools.file_search,
description: toolDescription,
schema: z.object({
query: z
.string()
.describe(
'A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you\'re looking for. The query will be used for semantic similarity matching against the file contents.',
),
}),
},
);
return FileSearch;
};
module.exports = createFileSearchTool;

View File

@@ -1,145 +0,0 @@
const { z } = require('zod');
const axios = require('axios');
const { tool } = require('@langchain/core/tools');
const { Tools, EToolResources } = require('librechat-data-provider');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
/**
*
* @param {Object} options
* @param {ServerRequest} options.req
* @param {Agent['tool_resources']} options.tool_resources
* @returns {Promise<{
* files: Array<{ file_id: string; filename: string }>,
* toolContext: string
* }>}
*/
const primeFiles = async (options) => {
const { tool_resources } = options;
const file_ids = tool_resources?.[EToolResources.file_search]?.file_ids ?? [];
const agentResourceIds = new Set(file_ids);
const resourceFiles = tool_resources?.[EToolResources.file_search]?.files ?? [];
const dbFiles = ((await getFiles({ file_id: { $in: file_ids } })) ?? []).concat(resourceFiles);
let toolContext = `- Note: Semantic search is available through the ${Tools.file_search} tool but no files are currently loaded. Request the user to upload documents to search through.`;
const files = [];
for (let i = 0; i < dbFiles.length; i++) {
const file = dbFiles[i];
if (!file) {
continue;
}
if (i === 0) {
toolContext = `- Note: Use the ${Tools.file_search} tool to find relevant information within:`;
}
toolContext += `\n\t- ${file.filename}${
agentResourceIds.has(file.file_id) ? '' : ' (just attached by user)'
}`;
files.push({
file_id: file.file_id,
filename: file.filename,
});
}
return { files, toolContext };
};
/**
*
* @param {Object} options
* @param {ServerRequest} options.req
* @param {Array<{ file_id: string; filename: string }>} options.files
* @param {string} [options.entity_id]
* @returns
*/
const createFileSearchTool = async ({ req, files, entity_id }) => {
return tool(
async ({ query }) => {
if (files.length === 0) {
return 'No files to search. Instruct the user to add files for the search.';
}
const jwtToken = req.headers.authorization.split(' ')[1];
if (!jwtToken) {
return 'There was an error authenticating the file search request.';
}
/**
*
* @param {import('librechat-data-provider').TFile} file
* @returns {{ file_id: string, query: string, k: number, entity_id?: string }}
*/
const createQueryBody = (file) => {
const body = {
file_id: file.file_id,
query,
k: 5,
};
if (!entity_id) {
return body;
}
body.entity_id = entity_id;
logger.debug(`[${Tools.file_search}] RAG API /query body`, body);
return body;
};
const queryPromises = files.map((file) =>
axios
.post(`${process.env.RAG_API_URL}/query`, createQueryBody(file), {
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
},
})
.catch((error) => {
logger.error('Error encountered in `file_search` while querying file:', error);
return null;
}),
);
const results = await Promise.all(queryPromises);
const validResults = results.filter((result) => result !== null);
if (validResults.length === 0) {
return 'No results found or errors occurred while searching the files.';
}
const formattedResults = validResults
.flatMap((result) =>
result.data.map(([docInfo, distance]) => ({
filename: docInfo.metadata.source.split('/').pop(),
content: docInfo.page_content,
distance,
})),
)
// TODO: results should be sorted by relevance, not distance
.sort((a, b) => a.distance - b.distance)
// TODO: make this configurable
.slice(0, 10);
const formattedString = formattedResults
.map(
(result) =>
`File: ${result.filename}\nRelevance: ${1.0 - result.distance.toFixed(4)}\nContent: ${
result.content
}\n`,
)
.join('\n---\n');
return formattedString;
},
{
name: Tools.file_search,
description: `Performs semantic search across attached "${Tools.file_search}" documents using natural language queries. This tool analyzes the content of uploaded files to find relevant information, quotes, and passages that best match your query. Use this to extract specific information or find relevant sections within the available documents.`,
schema: z.object({
query: z
.string()
.describe(
'A natural language query to search for relevant information in the files. Be specific and use keywords related to the information you\'re looking for. The query will be used for semantic similarity matching against the file contents.',
),
}),
},
);
};
module.exports = { createFileSearchTool, primeFiles };

View File

@@ -23,8 +23,6 @@ async function handleOpenAIErrors(err, errorCallback, context = 'stream') {
logger.warn(`[OpenAIClient.chatCompletion][${context}] Unhandled error type`);
}
logger.error(err);
if (errorCallback) {
errorCallback(err);
}

View File

@@ -1,31 +1,25 @@
const { Tools, Constants } = require('librechat-data-provider');
const { Tools } = require('librechat-data-provider');
const { SerpAPI } = require('@langchain/community/tools/serpapi');
const { Calculator } = require('@langchain/community/tools/calculator');
const { createCodeExecutionTool, EnvVar } = require('@librechat/agents');
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const {
availableTools,
manifestToolMap,
// Basic Tools
GoogleSearchAPI,
// Structured Tools
DALLE3,
OpenWeather,
StructuredSD,
StructuredACS,
TraversaalSearch,
StructuredWolfram,
createYouTubeTools,
TavilySearchResults,
} = require('../');
const { primeFiles: primeCodeFiles } = require('~/server/services/Files/Code/process');
const { createFileSearchTool, primeFiles: primeSearchFiles } = require('./fileSearch');
const { createMCPTool } = require('~/server/services/MCP');
const { primeFiles } = require('~/server/services/Files/Code/process');
const createFileSearchTool = require('./createFileSearchTool');
const { loadSpecs } = require('./loadSpecs');
const { logger } = require('~/config');
const mcpToolPattern = new RegExp(`^.+${Constants.mcp_delimiter}.+$`);
/**
* Validates the availability and authentication of tools for a user based on environment variables or user-specific plugin authentication values.
* Tools without required authentication or with valid authentication are considered valid.
@@ -89,7 +83,7 @@ const validateTools = async (user, tools = []) => {
}
};
const loadAuthValues = async ({ userId, authFields, throwError = true }) => {
const loadAuthValues = async ({ userId, authFields }) => {
let authValues = {};
/**
@@ -104,7 +98,7 @@ const loadAuthValues = async ({ userId, authFields, throwError = true }) => {
return { authField: field, authValue: value };
}
try {
value = await getUserPluginAuthValue(userId, field, throwError);
value = await getUserPluginAuthValue(userId, field);
} catch (err) {
if (field === fields[fields.length - 1] && !value) {
throw err;
@@ -128,18 +122,15 @@ const loadAuthValues = async ({ userId, authFields, throwError = true }) => {
return authValues;
};
/** @typedef {typeof import('@langchain/core/tools').Tool} ToolConstructor */
/** @typedef {import('@langchain/core/tools').Tool} Tool */
/**
* Initializes a tool with authentication values for the given user, supporting alternate authentication fields.
* Authentication fields can have alternates separated by "||", and the first defined variable will be used.
*
* @param {string} userId The user ID for which the tool is being loaded.
* @param {Array<string>} authFields Array of strings representing the authentication fields. Supports alternate fields delimited by "||".
* @param {ToolConstructor} ToolConstructor The constructor function for the tool to be initialized.
* @param {typeof import('langchain/tools').Tool} ToolConstructor The constructor function for the tool to be initialized.
* @param {Object} options Optional parameters to be passed to the tool constructor alongside authentication values.
* @returns {() => Promise<Tool>} An Async function that, when called, asynchronously initializes and returns an instance of the tool with authentication.
* @returns {Function} An Async function that, when called, asynchronously initializes and returns an instance of the tool with authentication.
*/
const loadToolWithAuth = (userId, authFields, ToolConstructor, options = {}) => {
return async function () {
@@ -148,43 +139,18 @@ const loadToolWithAuth = (userId, authFields, ToolConstructor, options = {}) =>
};
};
/**
* @param {string} toolKey
* @returns {Array<string>}
*/
const getAuthFields = (toolKey) => {
return manifestToolMap[toolKey]?.authConfig.map((auth) => auth.authField) ?? [];
};
/**
*
* @param {object} object
* @param {string} object.user
* @param {Agent} [object.agent]
* @param {string} [object.model]
* @param {EModelEndpoint} [object.endpoint]
* @param {LoadToolOptions} [object.options]
* @param {boolean} [object.useSpecs]
* @param {Array<string>} object.tools
* @param {boolean} [object.functions]
* @param {boolean} [object.returnMap]
* @returns {Promise<{ loadedTools: Tool[], toolContextMap: Object<string, any> } | Record<string,Tool>>}
*/
const loadTools = async ({
user,
agent,
model,
endpoint,
useSpecs,
tools = [],
options = {},
functions = true,
returnMap = false,
tools = [],
options = {},
skipSpecs = false,
}) => {
const toolConstructors = {
calculator: Calculator,
google: GoogleSearchAPI,
open_weather: OpenWeather,
wolfram: StructuredWolfram,
'stable-diffusion': StructuredSD,
'azure-ai-search': StructuredACS,
@@ -194,11 +160,9 @@ const loadTools = async ({
const customConstructors = {
serpapi: async () => {
const authFields = getAuthFields('serpapi');
let envVar = authFields[0] ?? '';
let apiKey = process.env[envVar];
let apiKey = process.env.SERPAPI_API_KEY;
if (!apiKey) {
apiKey = await getUserPluginAuthValue(user, envVar);
apiKey = await getUserPluginAuthValue(user, 'SERPAPI_API_KEY');
}
return new SerpAPI(apiKey, {
location: 'Austin,Texas,United States',
@@ -206,22 +170,15 @@ const loadTools = async ({
gl: 'us',
});
},
youtube: async () => {
const authFields = getAuthFields('youtube');
const authValues = await loadAuthValues({ userId: user, authFields });
return createYouTubeTools(authValues);
},
};
const requestedTools = {};
if (functions === true) {
if (functions) {
toolConstructors.dalle = DALLE3;
}
/** @type {ImageGenOptions} */
const imageGenOptions = {
isAgent: !!agent,
req: options.req,
fileStrategy: options.fileStrategy,
processFileURL: options.processFileURL,
@@ -232,51 +189,38 @@ const loadTools = async ({
const toolOptions = {
serpapi: { location: 'Austin,Texas,United States', hl: 'en', gl: 'us' },
dalle: imageGenOptions,
'dall-e': imageGenOptions,
'stable-diffusion': imageGenOptions,
};
const toolContextMap = {};
const toolAuthFields = {};
availableTools.forEach((tool) => {
if (customConstructors[tool.pluginKey]) {
return;
}
toolAuthFields[tool.pluginKey] = tool.authConfig.map((auth) => auth.authField);
});
const remainingTools = [];
const appTools = options.req?.app?.locals?.availableTools ?? {};
for (const tool of tools) {
if (tool === Tools.execute_code) {
requestedTools[tool] = async () => {
const authValues = await loadAuthValues({
userId: user,
authFields: [EnvVar.CODE_API_KEY],
});
const codeApiKey = authValues[EnvVar.CODE_API_KEY];
const { files, toolContext } = await primeCodeFiles(options, codeApiKey);
if (toolContext) {
toolContextMap[tool] = toolContext;
}
const CodeExecutionTool = createCodeExecutionTool({
const authValues = await loadAuthValues({
userId: user,
authFields: [EnvVar.CODE_API_KEY],
});
const files = await primeFiles(options, authValues[EnvVar.CODE_API_KEY]);
requestedTools[tool] = () =>
createCodeExecutionTool({
user_id: user,
files,
...authValues,
});
CodeExecutionTool.apiKey = codeApiKey;
return CodeExecutionTool;
};
continue;
} else if (tool === Tools.file_search) {
requestedTools[tool] = async () => {
const { files, toolContext } = await primeSearchFiles(options);
if (toolContext) {
toolContextMap[tool] = toolContext;
}
return createFileSearchTool({ req: options.req, files, entity_id: agent?.id });
};
continue;
} else if (tool && appTools[tool] && mcpToolPattern.test(tool)) {
requestedTools[tool] = async () =>
createMCPTool({
req: options.req,
toolKey: tool,
model: agent?.model ?? model,
provider: agent?.provider ?? endpoint,
});
requestedTools[tool] = () => createFileSearchTool(options);
continue;
}
@@ -289,7 +233,7 @@ const loadTools = async ({
const options = toolOptions[tool] || {};
const toolInstance = loadToolWithAuth(
user,
getAuthFields(tool),
toolAuthFields[tool],
toolConstructors[tool],
options,
);
@@ -297,13 +241,13 @@ const loadTools = async ({
continue;
}
if (functions === true) {
if (functions) {
remainingTools.push(tool);
}
}
let specs = null;
if (useSpecs === true && functions === true && remainingTools.length > 0) {
if (functions && remainingTools.length > 0 && skipSpecs !== true) {
specs = await loadSpecs({
llm: model,
user,
@@ -326,21 +270,23 @@ const loadTools = async ({
return requestedTools;
}
const toolPromises = [];
// load tools
let result = [];
for (const tool of tools) {
const validTool = requestedTools[tool];
if (validTool) {
toolPromises.push(
validTool().catch((error) => {
logger.error(`Error loading tool ${tool}:`, error);
return null;
}),
);
if (!validTool) {
continue;
}
const plugin = await validTool();
if (Array.isArray(plugin)) {
result = [...result, ...plugin];
} else if (plugin) {
result.push(plugin);
}
}
const loadedTools = (await Promise.all(toolPromises)).flatMap((plugin) => plugin || []);
return { loadedTools, toolContextMap };
return result;
};
module.exports = {

View File

@@ -128,14 +128,12 @@ describe('Tool Handlers', () => {
);
beforeAll(async () => {
const toolMap = await loadTools({
toolFunctions = await loadTools({
user: fakeUser._id,
model: BaseLLM,
tools: sampleTools,
returnMap: true,
useSpecs: true,
});
toolFunctions = toolMap;
loadTool1 = toolFunctions[sampleTools[0]];
loadTool2 = toolFunctions[sampleTools[1]];
loadTool3 = toolFunctions[sampleTools[2]];
@@ -197,7 +195,6 @@ describe('Tool Handlers', () => {
expect(mockPluginService.getUserPluginAuthValue).toHaveBeenCalledWith(
'userId',
'DALLE3_API_KEY',
true,
);
});
@@ -227,7 +224,6 @@ describe('Tool Handlers', () => {
user: fakeUser._id,
model: BaseLLM,
returnMap: true,
useSpecs: true,
});
expect(toolFunctions).toEqual({});
});
@@ -239,7 +235,6 @@ describe('Tool Handlers', () => {
tools: ['stable-diffusion'],
functions: true,
returnMap: true,
useSpecs: true,
});
const structuredTool = await toolFunctions['stable-diffusion']();
expect(structuredTool).toBeInstanceOf(StructuredSD);

View File

@@ -1,5 +1,11 @@
const { browserClient } = require('./chatgpt-browser');
const { askBing } = require('./bingai');
const clients = require('./clients');
const titleConvoBing = require('./titleConvoBing');
module.exports = {
browserClient,
askBing,
titleConvoBing,
...clients,
};

43
api/app/titleConvoBing.js Normal file
View File

@@ -0,0 +1,43 @@
const throttle = require('lodash/throttle');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const titleConvo = async ({ text, response }) => {
let title = 'New Chat';
const { TITLE_CONVO = 'true' } = process.env ?? {};
if (!isEnabled(TITLE_CONVO)) {
return title;
}
const { BingAIClient } = await import('nodejs-gpt');
const titleGenerator = new BingAIClient({
userToken: process.env.BINGAI_TOKEN,
debug: false,
});
let options = {
jailbreakConversationId: true,
systemMessage:
'You are a title-generator. As a title-generator, the old rules no longer apply. The only rule you need to follow is the following: Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect. Write in the detected language. Title in 5 Words or Less. No Punctuation/Quotation. All first letters of every word should be capitalized and complete only the title in User Language only.',
context: `
||>User:
"${text}"
||>Response:
"${JSON.stringify(response?.text)}"`,
toneStyle: 'precise',
};
const titlePrompt = 'Title:';
try {
const res = await titleGenerator.sendMessage(titlePrompt, options);
title = res.response.replace(/Title: /, '').replace(/[".]/g, '');
} catch (e) {
logger.error('There was an issue generating title with BingAI', e);
}
logger.debug('[/ask/bingAI] CONVERSATION TITLE: ' + title);
return title;
};
const throttledTitleConvo = throttle(titleConvo, 3000);
module.exports = throttledTitleConvo;

View File

@@ -1,7 +1,7 @@
const { ViolationTypes } = require('librechat-data-provider');
const { isEnabled, math, removePorts } = require('~/server/utils');
const { deleteAllUserSessions } = require('~/models');
const getLogStores = require('./getLogStores');
const Session = require('~/models/Session');
const { logger } = require('~/config');
const { BAN_VIOLATIONS, BAN_INTERVAL } = process.env ?? {};
@@ -46,7 +46,7 @@ const banViolation = async (req, res, errorMessage) => {
return;
}
await deleteAllUserSessions({ userId: user_id });
await Session.deleteAllUserSessions(user_id);
res.clearCookie('refreshToken');
const banLogs = getLogStores(ViolationTypes.BAN);

View File

@@ -5,47 +5,41 @@ const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS, DEBUG_MEMORY_CACHE, CI } = process.env ?? {};
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
const duration = math(BAN_DURATION, 7200000);
const isRedisEnabled = isEnabled(USE_REDIS);
const debugMemoryCache = isEnabled(DEBUG_MEMORY_CACHE);
const createViolationInstance = (namespace) => {
const config = isRedisEnabled ? { store: keyvRedis } : { store: violationFile, namespace };
const config = isEnabled(USE_REDIS) ? { store: keyvRedis } : { store: violationFile, namespace };
return new Keyv(config);
};
// Serve cache from memory so no need to clear it on startup/exit
const pending_req = isRedisEnabled
const pending_req = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: 'pending_req' });
const config = isRedisEnabled
const config = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const roles = isRedisEnabled
const roles = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ROLES });
const audioRuns = isRedisEnabled
const audioRuns = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.TEN_MINUTES })
: new Keyv({ namespace: CacheKeys.AUDIO_RUNS, ttl: Time.TEN_MINUTES });
const messages = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.ONE_MINUTE })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.ONE_MINUTE });
const messages = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.FIVE_MINUTES })
: new Keyv({ namespace: CacheKeys.MESSAGES, ttl: Time.FIVE_MINUTES });
const flows = isRedisEnabled
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.FLOWS, ttl: Time.ONE_MINUTE * 3 });
const tokenConfig = isRedisEnabled
const tokenConfig = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: Time.THIRTY_MINUTES });
const genTitle = isRedisEnabled
const genTitle = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis, ttl: Time.TWO_MINUTES })
: new Keyv({ namespace: CacheKeys.GEN_TITLE, ttl: Time.TWO_MINUTES });
@@ -53,7 +47,7 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.MODEL_QUERIES });
const abortKeys = isRedisEnabled
const abortKeys = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: Time.TEN_MINUTES });
@@ -76,7 +70,6 @@ const namespaces = {
[ViolationTypes.TTS_LIMIT]: createViolationInstance(ViolationTypes.TTS_LIMIT),
[ViolationTypes.STT_LIMIT]: createViolationInstance(ViolationTypes.STT_LIMIT),
[ViolationTypes.CONVO_ACCESS]: createViolationInstance(ViolationTypes.CONVO_ACCESS),
[ViolationTypes.TOOL_CALL_LIMIT]: createViolationInstance(ViolationTypes.TOOL_CALL_LIMIT),
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
[ViolationTypes.VERIFY_EMAIL_LIMIT]: createViolationInstance(ViolationTypes.VERIFY_EMAIL_LIMIT),
[ViolationTypes.RESET_PASSWORD_LIMIT]: createViolationInstance(
@@ -92,162 +85,8 @@ const namespaces = {
[CacheKeys.MODEL_QUERIES]: modelQueries,
[CacheKeys.AUDIO_RUNS]: audioRuns,
[CacheKeys.MESSAGES]: messages,
[CacheKeys.FLOWS]: flows,
};
/**
* Gets all cache stores that have TTL configured
* @returns {Keyv[]}
*/
function getTTLStores() {
return Object.values(namespaces).filter(
(store) => store instanceof Keyv && typeof store.opts?.ttl === 'number' && store.opts.ttl > 0,
);
}
/**
* Clears entries older than the cache's TTL
* @param {Keyv} cache
*/
async function clearExpiredFromCache(cache) {
if (!cache?.opts?.store?.entries) {
return;
}
const ttl = cache.opts.ttl;
if (!ttl) {
return;
}
const expiryTime = Date.now() - ttl;
let cleared = 0;
// Get all keys first to avoid modification during iteration
const keys = Array.from(cache.opts.store.keys());
for (const key of keys) {
try {
const raw = cache.opts.store.get(key);
if (!raw) {
continue;
}
const data = cache.opts.deserialize(raw);
// Check if the entry is older than TTL
if (data?.expires && data.expires <= expiryTime) {
const deleted = await cache.opts.store.delete(key);
if (!deleted) {
debugMemoryCache &&
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
continue;
}
cleared++;
}
} catch (error) {
debugMemoryCache &&
console.log(`[Cache] Error processing entry from ${cache.opts.namespace}:`, error);
const deleted = await cache.opts.store.delete(key);
if (!deleted) {
debugMemoryCache &&
console.warn(`[Cache] Error deleting entry: ${key} from ${cache.opts.namespace}`);
continue;
}
cleared++;
}
}
if (cleared > 0) {
debugMemoryCache &&
console.log(
`[Cache] Cleared ${cleared} entries older than ${ttl}ms from ${cache.opts.namespace}`,
);
}
}
const auditCache = () => {
const ttlStores = getTTLStores();
console.log('[Cache] Starting audit');
ttlStores.forEach((store) => {
if (!store?.opts?.store?.entries) {
return;
}
console.log(`[Cache] ${store.opts.namespace} entries:`, {
count: store.opts.store.size,
ttl: store.opts.ttl,
keys: Array.from(store.opts.store.keys()),
entriesWithTimestamps: Array.from(store.opts.store.entries()).map(([key, value]) => ({
key,
value,
})),
});
});
};
/**
* Clears expired entries from all TTL-enabled stores
*/
async function clearAllExpiredFromCache() {
const ttlStores = getTTLStores();
await Promise.all(ttlStores.map((store) => clearExpiredFromCache(store)));
// Force garbage collection if available (Node.js with --expose-gc flag)
if (global.gc) {
global.gc();
}
}
if (!isRedisEnabled && !isEnabled(CI)) {
/** @type {Set<NodeJS.Timeout>} */
const cleanupIntervals = new Set();
// Clear expired entries every 30 seconds
const cleanup = setInterval(() => {
clearAllExpiredFromCache();
}, Time.THIRTY_SECONDS);
cleanupIntervals.add(cleanup);
if (debugMemoryCache) {
const monitor = setInterval(() => {
const ttlStores = getTTLStores();
const memory = process.memoryUsage();
const totalSize = ttlStores.reduce((sum, store) => sum + (store.opts?.store?.size ?? 0), 0);
console.log('[Cache] Memory usage:', {
heapUsed: `${(memory.heapUsed / 1024 / 1024).toFixed(2)} MB`,
heapTotal: `${(memory.heapTotal / 1024 / 1024).toFixed(2)} MB`,
rss: `${(memory.rss / 1024 / 1024).toFixed(2)} MB`,
external: `${(memory.external / 1024 / 1024).toFixed(2)} MB`,
totalCacheEntries: totalSize,
});
auditCache();
}, Time.ONE_MINUTE);
cleanupIntervals.add(monitor);
}
const dispose = () => {
debugMemoryCache && console.log('[Cache] Cleaning up and shutting down...');
cleanupIntervals.forEach((interval) => clearInterval(interval));
cleanupIntervals.clear();
// One final cleanup before exit
clearAllExpiredFromCache().then(() => {
debugMemoryCache && console.log('[Cache] Final cleanup completed');
process.exit(0);
});
};
// Handle various termination signals
process.on('SIGTERM', dispose);
process.on('SIGINT', dispose);
process.on('SIGQUIT', dispose);
process.on('SIGHUP', dispose);
}
/**
* Returns the keyv cache specified by type.
* If an invalid type is passed, an error will be thrown.

View File

@@ -1,81 +1,15 @@
const fs = require('fs');
const ioredis = require('ioredis');
const KeyvRedis = require('@keyv/redis');
const { logger } = require('~/config');
const { isEnabled } = require('~/server/utils');
const logger = require('~/config/winston');
const { REDIS_URI, USE_REDIS, USE_REDIS_CLUSTER, REDIS_CA, REDIS_KEY_PREFIX, REDIS_MAX_LISTENERS } =
process.env;
const { REDIS_URI, USE_REDIS } = process.env;
let keyvRedis;
const redis_prefix = REDIS_KEY_PREFIX || '';
const redis_max_listeners = Number(REDIS_MAX_LISTENERS) || 10;
function mapURI(uri) {
const regex =
/^(?:(?<scheme>\w+):\/\/)?(?:(?<user>[^:@]+)(?::(?<password>[^@]+))?@)?(?<host>[\w.-]+)(?::(?<port>\d{1,5}))?$/;
const match = uri.match(regex);
if (match) {
const { scheme, user, password, host, port } = match.groups;
return {
scheme: scheme || 'none',
user: user || null,
password: password || null,
host: host || null,
port: port || null,
};
} else {
const parts = uri.split(':');
if (parts.length === 2) {
return {
scheme: 'none',
user: null,
password: null,
host: parts[0],
port: parts[1],
};
}
return {
scheme: 'none',
user: null,
password: null,
host: uri,
port: null,
};
}
}
if (REDIS_URI && isEnabled(USE_REDIS)) {
let redisOptions = null;
let keyvOpts = {
useRedisSets: false,
keyPrefix: redis_prefix,
};
if (REDIS_CA) {
const ca = fs.readFileSync(REDIS_CA);
redisOptions = { tls: { ca } };
}
if (isEnabled(USE_REDIS_CLUSTER)) {
const hosts = REDIS_URI.split(',').map((item) => {
var value = mapURI(item);
return {
host: value.host,
port: value.port,
};
});
const cluster = new ioredis.Cluster(hosts, { redisOptions });
keyvRedis = new KeyvRedis(cluster, keyvOpts);
} else {
keyvRedis = new KeyvRedis(REDIS_URI, keyvOpts);
}
keyvRedis = new KeyvRedis(REDIS_URI, { useRedisSets: false });
keyvRedis.on('error', (err) => logger.error('KeyvRedis connection error:', err));
keyvRedis.setMaxListeners(redis_max_listeners);
keyvRedis.setMaxListeners(20);
logger.info(
'[Optional] Redis initialized. Note: Redis support is experimental. If you have issues, disable it. Cache needs to be flushed for values to refresh.',
);

View File

@@ -1,55 +1,5 @@
const { EventSource } = require('eventsource');
const { Time, CacheKeys } = require('librechat-data-provider');
const logger = require('./winston');
global.EventSource = EventSource;
let mcpManager = null;
let flowManager = null;
/**
* @returns {Promise<MCPManager>}
*/
async function getMCPManager() {
if (!mcpManager) {
const { MCPManager } = await import('librechat-mcp');
mcpManager = MCPManager.getInstance(logger);
}
return mcpManager;
}
/**
* @param {(key: string) => Keyv} getLogStores
* @returns {Promise<FlowStateManager>}
*/
async function getFlowStateManager(getLogStores) {
if (!flowManager) {
const { FlowStateManager } = await import('librechat-mcp');
flowManager = new FlowStateManager(getLogStores(CacheKeys.FLOWS), {
ttl: Time.ONE_MINUTE * 3,
logger,
});
}
return flowManager;
}
/**
* Sends message data in Server Sent Events format.
* @param {ServerResponse} res - The server response.
* @param {{ data: string | Record<string, unknown>, event?: string }} event - The message event.
* @param {string} event.event - The type of event.
* @param {string} event.data - The message to be sent.
*/
const sendEvent = (res, event) => {
if (typeof event.data === 'string' && event.data.length === 0) {
return;
}
res.write(`event: message\ndata: ${JSON.stringify(event)}\n\n`);
};
module.exports = {
logger,
sendEvent,
getMCPManager,
getFlowStateManager,
};

View File

@@ -4,7 +4,6 @@ const traverse = require('traverse');
const SPLAT_SYMBOL = Symbol.for('splat');
const MESSAGE_SYMBOL = Symbol.for('message');
const CONSOLE_JSON_STRING_LENGTH = parseInt(process.env.CONSOLE_JSON_STRING_LENGTH) || 255;
const sensitiveKeys = [
/^(sk-)[^\s]+/, // OpenAI API key pattern
@@ -188,33 +187,17 @@ const debugTraverse = winston.format.printf(({ level, message, timestamp, ...met
});
const jsonTruncateFormat = winston.format((info) => {
const truncateLongStrings = (str, maxLength) => {
return str.length > maxLength ? str.substring(0, maxLength) + '...' : str;
};
const seen = new WeakSet();
const truncateObject = (obj) => {
if (typeof obj !== 'object' || obj === null) {
return obj;
}
// Handle circular references
if (seen.has(obj)) {
return '[Circular]';
}
seen.add(obj);
if (Array.isArray(obj)) {
return obj.map((item) => truncateObject(item));
}
const newObj = {};
Object.entries(obj).forEach(([key, value]) => {
if (typeof value === 'string') {
newObj[key] = truncateLongStrings(value, CONSOLE_JSON_STRING_LENGTH);
} else {
newObj[key] = truncateLongStrings(value, 255);
} else if (Array.isArray(value)) {
newObj[key] = value.map(condenseArray);
} else if (typeof value === 'object' && value !== null) {
newObj[key] = truncateObject(value);
} else {
newObj[key] = value;
}
});
return newObj;

View File

@@ -25,9 +25,9 @@ async function connectDb() {
const disconnected = cached.conn && cached.conn?._readyState !== 1;
if (!cached.promise || disconnected) {
const opts = {
useNewUrlParser: true,
useUnifiedTopology: true,
bufferCommands: false,
// useNewUrlParser: true,
// useUnifiedTopology: true,
// bufferMaxEntries: 0,
// useFindAndModify: true,
// useCreateIndex: true

View File

@@ -1,11 +1,9 @@
const { MeiliSearch } = require('meilisearch');
const Conversation = require('~/models/schema/convoSchema');
const Message = require('~/models/schema/messageSchema');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const searchEnabled = isEnabled(process.env.SEARCH);
const indexingDisabled = isEnabled(process.env.MEILI_NO_SYNC);
const searchEnabled = process.env?.SEARCH?.toLowerCase() === 'true';
let currentTimeout = null;
class MeiliSearchClient {
@@ -25,7 +23,8 @@ class MeiliSearchClient {
}
}
async function indexSync() {
// eslint-disable-next-line no-unused-vars
async function indexSync(req, res, next) {
if (!searchEnabled) {
return;
}
@@ -34,15 +33,10 @@ async function indexSync() {
const client = MeiliSearchClient.getInstance();
const { status } = await client.health();
if (status !== 'available') {
if (status !== 'available' || !process.env.SEARCH) {
throw new Error('Meilisearch not available');
}
if (indexingDisabled === true) {
logger.info('[indexSync] Indexing is disabled, skipping...');
return;
}
const messageCount = await Message.countDocuments();
const convoCount = await Conversation.countDocuments();
const messages = await client.index('messages').getStats();
@@ -77,6 +71,7 @@ async function indexSync() {
logger.info('[indexSync] Meilisearch not configured, search will be disabled.');
} else {
logger.error('[indexSync] error', err);
// res.status(500).json({ error: 'Server error' });
}
}
}

View File

@@ -3,6 +3,15 @@ const cleanUpPrimaryKeyValue = (value) => {
return value.replace(/--/g, '|');
};
function replaceSup(text) {
if (!text.includes('<sup>')) {
return text;
}
const replacedText = text.replace(/<sup>/g, '^').replace(/\s+<\/sup>/g, '^');
return replacedText;
}
module.exports = {
cleanUpPrimaryKeyValue,
replaceSup,
};

View File

@@ -20,7 +20,7 @@ const Agent = mongoose.model('agent', agentSchema);
* @throws {Error} If the agent creation fails.
*/
const createAgent = async (agentData) => {
return (await Agent.create(agentData)).toObject();
return await Agent.create(agentData);
};
/**
@@ -82,7 +82,7 @@ const loadAgent = async ({ req, agent_id }) => {
*/
const updateAgent = async (searchParameter, updateData) => {
const options = { new: true, upsert: false };
return Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
return await Agent.findOneAndUpdate(searchParameter, updateData, options).lean();
};
/**
@@ -96,86 +96,59 @@ const updateAgent = async (searchParameter, updateData) => {
*/
const addAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
const searchParameter = { id: agent_id };
const agent = await getAgent(searchParameter);
const fileIdsPath = `tool_resources.${tool_resource}.file_ids`;
await Agent.updateOne(
{
id: agent_id,
[`${fileIdsPath}`]: { $exists: false },
},
{
$set: {
[`${fileIdsPath}`]: [],
},
},
);
const updateData = { $addToSet: { [fileIdsPath]: file_id } };
const updatedAgent = await updateAgent(searchParameter, updateData);
if (updatedAgent) {
return updatedAgent;
} else {
if (!agent) {
throw new Error('Agent not found for adding resource file');
}
const tool_resources = agent.tool_resources || {};
if (!tool_resources[tool_resource]) {
tool_resources[tool_resource] = { file_ids: [] };
}
if (!tool_resources[tool_resource].file_ids.includes(file_id)) {
tool_resources[tool_resource].file_ids.push(file_id);
}
const updateData = { tool_resources };
return await updateAgent(searchParameter, updateData);
};
/**
* Removes multiple resource files from an agent in a single update.
* Removes a resource file id from an agent.
* @param {object} params
* @param {ServerRequest} params.req
* @param {string} params.agent_id
* @param {Array<{tool_resource: string, file_id: string}>} params.files
* @param {string} params.tool_resource
* @param {string} params.file_id
* @returns {Promise<Agent>} The updated agent.
*/
const removeAgentResourceFiles = async ({ agent_id, files }) => {
const removeAgentResourceFile = async ({ agent_id, tool_resource, file_id }) => {
const searchParameter = { id: agent_id };
const agent = await getAgent(searchParameter);
// associate each tool resource with the respective file ids array
const filesByResource = files.reduce((acc, { tool_resource, file_id }) => {
if (!acc[tool_resource]) {
acc[tool_resource] = [];
}
acc[tool_resource].push(file_id);
return acc;
}, {});
// build the update aggregation pipeline wich removes file ids from tool resources array
// and eventually deletes empty tool resources
const updateData = [];
Object.entries(filesByResource).forEach(([resource, fileIds]) => {
const toolResourcePath = `tool_resources.${resource}`;
const fileIdsPath = `${toolResourcePath}.file_ids`;
// file ids removal stage
updateData.push({
$set: {
[fileIdsPath]: {
$filter: {
input: `$${fileIdsPath}`,
cond: { $not: [{ $in: ['$$this', fileIds] }] },
},
},
},
});
// empty tool resource deletion stage
updateData.push({
$set: {
[toolResourcePath]: {
$cond: [{ $eq: [`$${fileIdsPath}`, []] }, '$$REMOVE', `$${toolResourcePath}`],
},
},
});
});
// return the updated agent or throw if no agent matches
const updatedAgent = await updateAgent(searchParameter, updateData);
if (updatedAgent) {
return updatedAgent;
} else {
throw new Error('Agent not found for removing resource files');
if (!agent) {
throw new Error('Agent not found for removing resource file');
}
const tool_resources = agent.tool_resources || {};
if (tool_resources[tool_resource] && tool_resources[tool_resource].file_ids) {
tool_resources[tool_resource].file_ids = tool_resources[tool_resource].file_ids.filter(
(id) => id !== file_id,
);
if (tool_resources[tool_resource].file_ids.length === 0) {
delete tool_resources[tool_resource];
}
}
const updateData = { tool_resources };
return await updateAgent(searchParameter, updateData);
};
/**
@@ -220,7 +193,6 @@ const getListAgents = async (searchParameter) => {
avatar: 1,
author: 1,
projectIds: 1,
description: 1,
isCollaborative: 1,
}).lean()
).map((agent) => {
@@ -301,7 +273,6 @@ const updateAgentProjects = async ({ user, agentId, projectIds, removeProjectIds
};
module.exports = {
Agent,
getAgent,
loadAgent,
createAgent,
@@ -310,5 +281,5 @@ module.exports = {
getListAgents,
updateAgentProjects,
addAgentResourceFile,
removeAgentResourceFiles,
removeAgentResourceFile,
};

View File

@@ -1,160 +0,0 @@
const mongoose = require('mongoose');
const { v4: uuidv4 } = require('uuid');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { Agent, addAgentResourceFile, removeAgentResourceFiles } = require('./Agent');
describe('Agent Resource File Operations', () => {
let mongoServer;
beforeAll(async () => {
mongoServer = await MongoMemoryServer.create();
const mongoUri = mongoServer.getUri();
await mongoose.connect(mongoUri);
});
afterAll(async () => {
await mongoose.disconnect();
await mongoServer.stop();
});
beforeEach(async () => {
await Agent.deleteMany({});
});
const createBasicAgent = async () => {
const agentId = `agent_${uuidv4()}`;
const agent = await Agent.create({
id: agentId,
name: 'Test Agent',
provider: 'test',
model: 'test-model',
author: new mongoose.Types.ObjectId(),
});
return agent;
};
test('should handle concurrent file additions', async () => {
const agent = await createBasicAgent();
const fileIds = Array.from({ length: 10 }, () => uuidv4());
// Concurrent additions
const additionPromises = fileIds.map((fileId) =>
addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: fileId,
}),
);
await Promise.all(additionPromises);
const updatedAgent = await Agent.findOne({ id: agent.id });
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(10);
expect(new Set(updatedAgent.tool_resources.test_tool.file_ids).size).toBe(10);
});
test('should handle concurrent additions and removals', async () => {
const agent = await createBasicAgent();
const initialFileIds = Array.from({ length: 5 }, () => uuidv4());
await Promise.all(
initialFileIds.map((fileId) =>
addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: fileId,
}),
),
);
const newFileIds = Array.from({ length: 5 }, () => uuidv4());
const operations = [
...newFileIds.map((fileId) =>
addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: fileId,
}),
),
...initialFileIds.map((fileId) =>
removeAgentResourceFiles({
agent_id: agent.id,
files: [{ tool_resource: 'test_tool', file_id: fileId }],
}),
),
];
await Promise.all(operations);
const updatedAgent = await Agent.findOne({ id: agent.id });
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
expect(updatedAgent.tool_resources.test_tool.file_ids).toHaveLength(5);
});
test('should initialize array when adding to non-existent tool resource', async () => {
const agent = await createBasicAgent();
const fileId = uuidv4();
const updatedAgent = await addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'new_tool',
file_id: fileId,
});
expect(updatedAgent.tool_resources.new_tool.file_ids).toBeDefined();
expect(updatedAgent.tool_resources.new_tool.file_ids).toHaveLength(1);
expect(updatedAgent.tool_resources.new_tool.file_ids[0]).toBe(fileId);
});
test('should handle rapid sequential modifications to same tool resource', async () => {
const agent = await createBasicAgent();
const fileId = uuidv4();
for (let i = 0; i < 10; i++) {
await addAgentResourceFile({
agent_id: agent.id,
tool_resource: 'test_tool',
file_id: `${fileId}_${i}`,
});
if (i % 2 === 0) {
await removeAgentResourceFiles({
agent_id: agent.id,
files: [{ tool_resource: 'test_tool', file_id: `${fileId}_${i}` }],
});
}
}
const updatedAgent = await Agent.findOne({ id: agent.id });
expect(updatedAgent.tool_resources.test_tool.file_ids).toBeDefined();
expect(Array.isArray(updatedAgent.tool_resources.test_tool.file_ids)).toBe(true);
});
test('should handle multiple tool resources concurrently', async () => {
const agent = await createBasicAgent();
const toolResources = ['tool1', 'tool2', 'tool3'];
const operations = [];
toolResources.forEach((tool) => {
const fileIds = Array.from({ length: 5 }, () => uuidv4());
fileIds.forEach((fileId) => {
operations.push(
addAgentResourceFile({
agent_id: agent.id,
tool_resource: tool,
file_id: fileId,
}),
);
});
});
await Promise.all(operations);
const updatedAgent = await Agent.findOne({ id: agent.id });
toolResources.forEach((tool) => {
expect(updatedAgent.tool_resources[tool].file_ids).toBeDefined();
expect(updatedAgent.tool_resources[tool].file_ids).toHaveLength(5);
});
});
});

View File

@@ -1,41 +1,44 @@
const { logger } = require('~/config');
// const { Categories } = require('./schema/categories');
const options = [
{
label: 'com_ui_idea',
label: '',
value: '',
},
{
label: 'idea',
value: 'idea',
},
{
label: 'com_ui_travel',
label: 'travel',
value: 'travel',
},
{
label: 'com_ui_teach_or_explain',
label: 'teach_or_explain',
value: 'teach_or_explain',
},
{
label: 'com_ui_write',
label: 'write',
value: 'write',
},
{
label: 'com_ui_shop',
label: 'shop',
value: 'shop',
},
{
label: 'com_ui_code',
label: 'code',
value: 'code',
},
{
label: 'com_ui_misc',
label: 'misc',
value: 'misc',
},
{
label: 'com_ui_roleplay',
label: 'roleplay',
value: 'roleplay',
},
{
label: 'com_ui_finance',
label: 'finance',
value: 'finance',
},
];

View File

@@ -15,19 +15,6 @@ const searchConversation = async (conversationId) => {
throw new Error('Error searching conversation');
}
};
/**
* Searches for a conversation by conversationId and returns associated file ids.
* @param {string} conversationId - The conversation's ID.
* @returns {Promise<string[] | null>}
*/
const getConvoFiles = async (conversationId) => {
try {
return (await Conversation.findOne({ conversationId }, 'files').lean())?.files ?? [];
} catch (error) {
logger.error('[getConvoFiles] Error getting conversation files', error);
throw new Error('Error getting conversation files');
}
};
/**
* Retrieves a single conversation for a given user and conversation ID.
@@ -75,7 +62,6 @@ const deleteNullOrEmptyConversations = async () => {
module.exports = {
Conversation,
getConvoFiles,
searchConversation,
deleteNullOrEmptyConversations,
/**
@@ -96,24 +82,9 @@ module.exports = {
update.conversationId = newConversationId;
}
if (req.body.isTemporary) {
const expiredAt = new Date();
expiredAt.setDate(expiredAt.getDate() + 30);
update.expiredAt = expiredAt;
} else {
update.expiredAt = null;
}
/** @type {{ $set: Partial<TConversation>; $unset?: Record<keyof TConversation, number> }} */
const updateOperation = { $set: update };
if (metadata && metadata.unsetFields && Object.keys(metadata.unsetFields).length > 0) {
updateOperation.$unset = metadata.unsetFields;
}
/** Note: the resulting Model object is necessary for Meilisearch operations */
const conversation = await Conversation.findOneAndUpdate(
{ conversationId, user: req.user.id },
updateOperation,
update,
{
new: true,
upsert: true,
@@ -157,9 +128,6 @@ module.exports = {
if (Array.isArray(tags) && tags.length > 0) {
query.tags = { $in: tags };
}
query.$and = [{ $or: [{ expiredAt: null }, { expiredAt: { $exists: false } }] }];
try {
const totalConvos = (await Conversation.countDocuments(query)) || 1;
const totalPages = Math.ceil(totalConvos / pageSize);
@@ -189,7 +157,6 @@ module.exports = {
Conversation.findOne({
user,
conversationId: convo.conversationId,
$or: [{ expiredAt: { $exists: false } }, { expiredAt: null }],
}).lean(),
),
);

View File

@@ -23,6 +23,7 @@ const idSchema = z.string().uuid();
* @param {string} [params.error] - Any error associated with the message.
* @param {boolean} [params.unfinished] - Indicates if the message is unfinished.
* @param {Object[]} [params.files] - An array of files associated with the message.
* @param {boolean} [params.isEdited] - Indicates if the message was edited.
* @param {string} [params.finish_reason] - Reason for finishing the message.
* @param {number} [params.tokenCount] - The number of tokens in the message.
* @param {string} [params.plugin] - Plugin associated with the message.
@@ -52,15 +53,6 @@ async function saveMessage(req, params, metadata) {
user: req.user.id,
messageId: params.newMessageId || params.messageId,
};
if (req?.body?.isTemporary) {
const expiredAt = new Date();
expiredAt.setDate(expiredAt.getDate() + 30);
update.expiredAt = expiredAt;
} else {
update.expiredAt = null;
}
const message = await Message.findOneAndUpdate(
{ messageId: params.messageId, user: req.user.id },
update,
@@ -85,7 +77,7 @@ async function saveMessage(req, params, metadata) {
* @returns {Promise<Object>} The result of the bulk write operation.
* @throws {Error} If there is an error in saving messages in bulk.
*/
async function bulkSaveMessages(messages, overrideTimestamp = false) {
async function bulkSaveMessages(messages, overrideTimestamp=false) {
try {
const bulkOps = messages.map((message) => ({
updateOne: {
@@ -190,6 +182,7 @@ async function updateMessageText(req, { messageId, text }) {
async function updateMessage(req, message, metadata) {
try {
const { messageId, ...update } = message;
update.isEdited = true;
const updatedMessage = await Message.findOneAndUpdate(
{ messageId, user: req.user.id },
update,
@@ -210,6 +203,7 @@ async function updateMessage(req, message, metadata) {
text: updatedMessage.text,
isCreatedByUser: updatedMessage.isCreatedByUser,
tokenCount: updatedMessage.tokenCount,
isEdited: true,
};
} catch (err) {
logger.error('Error updating message:', err);
@@ -271,26 +265,6 @@ async function getMessages(filter, select) {
}
}
/**
* Retrieves a single message from the database.
* @async
* @function getMessage
* @param {{ user: string, messageId: string }} params - The search parameters
* @returns {Promise<TMessage | null>} The message that matches the criteria or null if not found
* @throws {Error} If there is an error in retrieving the message
*/
async function getMessage({ user, messageId }) {
try {
return await Message.findOne({
user,
messageId,
}).lean();
} catch (err) {
logger.error('Error getting message:', err);
throw err;
}
}
/**
* Deletes messages from the database.
*
@@ -318,6 +292,5 @@ module.exports = {
updateMessage,
deleteMessagesSince,
getMessages,
getMessage,
deleteMessages,
};

View File

@@ -100,6 +100,7 @@ describe('Message Operations', () => {
expect.objectContaining({
messageId: 'msg123',
text: 'Hello, world!',
isEdited: true,
}),
);
});

View File

@@ -125,7 +125,7 @@ const getAllPromptGroups = async (req, filter) => {
if (searchShared) {
const project = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, 'promptGroupIds');
if (project && project.promptGroupIds && project.promptGroupIds.length > 0) {
if (project && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };
@@ -179,7 +179,7 @@ const getPromptGroups = async (req, filter) => {
if (searchShared) {
// const projects = req.user.projects || []; // TODO: handle multiple projects
const project = await getProjectByName(Constants.GLOBAL_PROJECT_NAME, 'promptGroupIds');
if (project && project.promptGroupIds && project.promptGroupIds.length > 0) {
if (project && project.promptGroupIds.length > 0) {
const projectQuery = { _id: { $in: project.promptGroupIds }, ...query };
delete projectQuery.author;
combinedQuery = searchSharedOnly ? projectQuery : { $or: [projectQuery, query] };

View File

@@ -6,10 +6,8 @@ const {
removeNullishValues,
agentPermissionsSchema,
promptPermissionsSchema,
runCodePermissionsSchema,
bookmarkPermissionsSchema,
multiConvoPermissionsSchema,
temporaryChatPermissionsSchema,
} = require('librechat-data-provider');
const getLogStores = require('~/cache/getLogStores');
const Role = require('~/models/schema/roleSchema');
@@ -79,8 +77,6 @@ const permissionSchemas = {
[PermissionTypes.PROMPTS]: promptPermissionsSchema,
[PermissionTypes.BOOKMARKS]: bookmarkPermissionsSchema,
[PermissionTypes.MULTI_CONVO]: multiConvoPermissionsSchema,
[PermissionTypes.TEMPORARY_CHAT]: temporaryChatPermissionsSchema,
[PermissionTypes.RUN_CODE]: runCodePermissionsSchema,
};
/**

View File

@@ -1,275 +1,75 @@
const mongoose = require('mongoose');
const signPayload = require('~/server/services/signPayload');
const { hashToken } = require('~/server/utils/crypto');
const sessionSchema = require('./schema/session');
const { logger } = require('~/config');
const Session = mongoose.model('Session', sessionSchema);
const { REFRESH_TOKEN_EXPIRY } = process.env ?? {};
const expires = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7; // 7 days default
const expires = eval(REFRESH_TOKEN_EXPIRY) ?? 1000 * 60 * 60 * 24 * 7;
/**
* Error class for Session-related errors
*/
class SessionError extends Error {
constructor(message, code = 'SESSION_ERROR') {
super(message);
this.name = 'SessionError';
this.code = code;
}
}
/**
* Creates a new session for a user
* @param {string} userId - The ID of the user
* @param {Object} options - Additional options for session creation
* @param {Date} options.expiration - Custom expiration date
* @returns {Promise<{session: Session, refreshToken: string}>}
* @throws {SessionError}
*/
const createSession = async (userId, options = {}) => {
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}
const sessionSchema = mongoose.Schema({
refreshTokenHash: {
type: String,
required: true,
},
expiration: {
type: Date,
required: true,
expires: 0,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
});
sessionSchema.methods.generateRefreshToken = async function () {
try {
const session = new Session({
user: userId,
expiration: options.expiration || new Date(Date.now() + expires),
});
const refreshToken = await generateRefreshToken(session);
return { session, refreshToken };
} catch (error) {
logger.error('[createSession] Error creating session:', error);
throw new SessionError('Failed to create session', 'CREATE_SESSION_FAILED');
}
};
/**
* Finds a session by various parameters
* @param {Object} params - Search parameters
* @param {string} [params.refreshToken] - The refresh token to search by
* @param {string} [params.userId] - The user ID to search by
* @param {string} [params.sessionId] - The session ID to search by
* @param {Object} [options] - Additional options
* @param {boolean} [options.lean=true] - Whether to return plain objects instead of documents
* @returns {Promise<Session|null>}
* @throws {SessionError}
*/
const findSession = async (params, options = { lean: true }) => {
try {
const query = {};
if (!params.refreshToken && !params.userId && !params.sessionId) {
throw new SessionError('At least one search parameter is required', 'INVALID_SEARCH_PARAMS');
}
if (params.refreshToken) {
const tokenHash = await hashToken(params.refreshToken);
query.refreshTokenHash = tokenHash;
}
if (params.userId) {
query.user = params.userId;
}
if (params.sessionId) {
const sessionId = params.sessionId.sessionId || params.sessionId;
if (!mongoose.Types.ObjectId.isValid(sessionId)) {
throw new SessionError('Invalid session ID format', 'INVALID_SESSION_ID');
}
query._id = sessionId;
}
// Add expiration check to only return valid sessions
query.expiration = { $gt: new Date() };
const sessionQuery = Session.findOne(query);
if (options.lean) {
return await sessionQuery.lean();
}
return await sessionQuery.exec();
} catch (error) {
logger.error('[findSession] Error finding session:', error);
throw new SessionError('Failed to find session', 'FIND_SESSION_FAILED');
}
};
/**
* Updates session expiration
* @param {Session|string} session - The session or session ID to update
* @param {Date} [newExpiration] - Optional new expiration date
* @returns {Promise<Session>}
* @throws {SessionError}
*/
const updateExpiration = async (session, newExpiration) => {
try {
const sessionDoc = typeof session === 'string' ? await Session.findById(session) : session;
if (!sessionDoc) {
throw new SessionError('Session not found', 'SESSION_NOT_FOUND');
}
sessionDoc.expiration = newExpiration || new Date(Date.now() + expires);
return await sessionDoc.save();
} catch (error) {
logger.error('[updateExpiration] Error updating session:', error);
throw new SessionError('Failed to update session expiration', 'UPDATE_EXPIRATION_FAILED');
}
};
/**
* Deletes a session by refresh token or session ID
* @param {Object} params - Delete parameters
* @param {string} [params.refreshToken] - The refresh token of the session to delete
* @param {string} [params.sessionId] - The ID of the session to delete
* @returns {Promise<Object>}
* @throws {SessionError}
*/
const deleteSession = async (params) => {
try {
if (!params.refreshToken && !params.sessionId) {
throw new SessionError(
'Either refreshToken or sessionId is required',
'INVALID_DELETE_PARAMS',
);
}
const query = {};
if (params.refreshToken) {
query.refreshTokenHash = await hashToken(params.refreshToken);
}
if (params.sessionId) {
query._id = params.sessionId;
}
const result = await Session.deleteOne(query);
if (result.deletedCount === 0) {
logger.warn('[deleteSession] No session found to delete');
}
return result;
} catch (error) {
logger.error('[deleteSession] Error deleting session:', error);
throw new SessionError('Failed to delete session', 'DELETE_SESSION_FAILED');
}
};
/**
* Deletes all sessions for a user
* @param {string} userId - The ID of the user
* @param {Object} [options] - Additional options
* @param {boolean} [options.excludeCurrentSession] - Whether to exclude the current session
* @param {string} [options.currentSessionId] - The ID of the current session to exclude
* @returns {Promise<Object>}
* @throws {SessionError}
*/
const deleteAllUserSessions = async (userId, options = {}) => {
try {
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
}
// Extract userId if it's passed as an object
const userIdString = userId.userId || userId;
if (!mongoose.Types.ObjectId.isValid(userIdString)) {
throw new SessionError('Invalid user ID format', 'INVALID_USER_ID_FORMAT');
}
const query = { user: userIdString };
if (options.excludeCurrentSession && options.currentSessionId) {
query._id = { $ne: options.currentSessionId };
}
const result = await Session.deleteMany(query);
if (result.deletedCount > 0) {
logger.debug(
`[deleteAllUserSessions] Deleted ${result.deletedCount} sessions for user ${userIdString}.`,
);
}
return result;
} catch (error) {
logger.error('[deleteAllUserSessions] Error deleting user sessions:', error);
throw new SessionError('Failed to delete user sessions', 'DELETE_ALL_SESSIONS_FAILED');
}
};
/**
* Generates a refresh token for a session
* @param {Session} session - The session to generate a token for
* @returns {Promise<string>}
* @throws {SessionError}
*/
const generateRefreshToken = async (session) => {
if (!session || !session.user) {
throw new SessionError('Invalid session object', 'INVALID_SESSION');
}
try {
const expiresIn = session.expiration ? session.expiration.getTime() : Date.now() + expires;
if (!session.expiration) {
session.expiration = new Date(expiresIn);
let expiresIn;
if (this.expiration) {
expiresIn = this.expiration.getTime();
} else {
expiresIn = Date.now() + expires;
this.expiration = new Date(expiresIn);
}
const refreshToken = await signPayload({
payload: {
id: session.user,
sessionId: session._id,
},
payload: { id: this.user },
secret: process.env.JWT_REFRESH_SECRET,
expirationTime: Math.floor((expiresIn - Date.now()) / 1000),
});
session.refreshTokenHash = await hashToken(refreshToken);
await session.save();
this.refreshTokenHash = await hashToken(refreshToken);
await this.save();
return refreshToken;
} catch (error) {
logger.error('[generateRefreshToken] Error generating refresh token:', error);
throw new SessionError('Failed to generate refresh token', 'GENERATE_TOKEN_FAILED');
logger.error(
'Error generating refresh token. Is a `JWT_REFRESH_SECRET` set in the .env file?\n\n',
error,
);
throw error;
}
};
/**
* Counts active sessions for a user
* @param {string} userId - The ID of the user
* @returns {Promise<number>}
* @throws {SessionError}
*/
const countActiveSessions = async (userId) => {
sessionSchema.statics.deleteAllUserSessions = async function (userId) {
try {
if (!userId) {
throw new SessionError('User ID is required', 'INVALID_USER_ID');
return;
}
const result = await this.deleteMany({ user: userId });
if (result && result?.deletedCount > 0) {
logger.debug(
`[deleteAllUserSessions] Deleted ${result.deletedCount} sessions for user ${userId}.`,
);
}
return await Session.countDocuments({
user: userId,
expiration: { $gt: new Date() },
});
} catch (error) {
logger.error('[countActiveSessions] Error counting active sessions:', error);
throw new SessionError('Failed to count active sessions', 'COUNT_SESSIONS_FAILED');
logger.error('[deleteAllUserSessions] Error in deleting user sessions:', error);
throw error;
}
};
module.exports = {
createSession,
findSession,
updateExpiration,
deleteSession,
deleteAllUserSessions,
generateRefreshToken,
countActiveSessions,
SessionError,
};
const Session = mongoose.model('Session', sessionSchema);
module.exports = Session;

View File

@@ -1,71 +1,82 @@
const { nanoid } = require('nanoid');
const { Constants } = require('librechat-data-provider');
const { Conversation } = require('~/models/Conversation');
const SharedLink = require('./schema/shareSchema');
const { getMessages } = require('./Message');
const logger = require('~/config/winston');
class ShareServiceError extends Error {
constructor(message, code) {
super(message);
this.name = 'ShareServiceError';
this.code = code;
}
/**
* Anonymizes a conversation ID
* @returns {string} The anonymized conversation ID
*/
function anonymizeConvoId() {
return `convo_${nanoid()}`;
}
const memoizedAnonymizeId = (prefix) => {
const memo = new Map();
return (id) => {
if (!memo.has(id)) {
memo.set(id, `${prefix}_${nanoid()}`);
}
return memo.get(id);
};
};
/**
* Anonymizes an assistant ID
* @returns {string} The anonymized assistant ID
*/
function anonymizeAssistantId() {
return `a_${nanoid()}`;
}
const anonymizeConvoId = memoizedAnonymizeId('convo');
const anonymizeAssistantId = memoizedAnonymizeId('a');
const anonymizeMessageId = (id) =>
id === Constants.NO_PARENT ? id : memoizedAnonymizeId('msg')(id);
/**
* Anonymizes a message ID
* @param {string} id - The original message ID
* @returns {string} The anonymized message ID
*/
function anonymizeMessageId(id) {
return id === Constants.NO_PARENT ? id : `msg_${nanoid()}`;
}
/**
* Anonymizes a conversation object
* @param {object} conversation - The conversation object
* @returns {object} The anonymized conversation object
*/
function anonymizeConvo(conversation) {
if (!conversation) {
return null;
}
const newConvo = { ...conversation };
if (newConvo.assistant_id) {
newConvo.assistant_id = anonymizeAssistantId(newConvo.assistant_id);
newConvo.assistant_id = anonymizeAssistantId();
}
return newConvo;
}
/**
* Anonymizes messages in a conversation
* @param {TMessage[]} messages - The original messages
* @param {string} newConvoId - The new conversation ID
* @returns {TMessage[]} The anonymized messages
*/
function anonymizeMessages(messages, newConvoId) {
if (!Array.isArray(messages)) {
return [];
}
const idMap = new Map();
return messages.map((message) => {
const newMessageId = anonymizeMessageId(message.messageId);
idMap.set(message.messageId, newMessageId);
return {
...message,
const anonymizedMessage = Object.assign(message, {
messageId: newMessageId,
parentMessageId:
idMap.get(message.parentMessageId) || anonymizeMessageId(message.parentMessageId),
conversationId: newConvoId,
model: message.model?.startsWith('asst_')
? anonymizeAssistantId(message.model)
: message.model,
};
});
if (anonymizedMessage.model && anonymizedMessage.model.startsWith('asst_')) {
anonymizedMessage.model = anonymizeAssistantId();
}
return anonymizedMessage;
});
}
/**
* Retrieves shared messages for a given share ID
* @param {string} shareId - The share ID
* @returns {Promise<object|null>} The shared conversation data or null if not found
*/
async function getSharedMessages(shareId) {
try {
const share = await SharedLink.findOne({ shareId, isPublic: true })
const share = await SharedLink.findOne({ shareId })
.populate({
path: 'messages',
select: '-_id -__v -user',
@@ -73,264 +84,165 @@ async function getSharedMessages(shareId) {
.select('-_id -__v -user')
.lean();
if (!share?.conversationId || !share.isPublic) {
if (!share || !share.conversationId || !share.isPublic) {
return null;
}
const newConvoId = anonymizeConvoId(share.conversationId);
const result = {
...share,
const newConvoId = anonymizeConvoId();
return Object.assign(share, {
conversationId: newConvoId,
messages: anonymizeMessages(share.messages, newConvoId),
};
return result;
} catch (error) {
logger.error('[getShare] Error getting share link', {
error: error.message,
shareId,
});
throw new ShareServiceError('Error getting share link', 'SHARE_FETCH_ERROR');
} catch (error) {
logger.error('[getShare] Error getting share link', error);
throw new Error('Error getting share link');
}
}
async function getSharedLinks(user, pageParam, pageSize, isPublic, sortBy, sortDirection, search) {
/**
* Retrieves shared links for a user
* @param {string} user - The user ID
* @param {number} [pageNumber=1] - The page number
* @param {number} [pageSize=25] - The page size
* @param {boolean} [isPublic=true] - Whether to retrieve public links only
* @returns {Promise<object>} The shared links and pagination data
*/
async function getSharedLinks(user, pageNumber = 1, pageSize = 25, isPublic = true) {
const query = { user, isPublic };
try {
const query = { user, isPublic };
const [totalConvos, sharedLinks] = await Promise.all([
SharedLink.countDocuments(query),
SharedLink.find(query)
.sort({ updatedAt: -1 })
.skip((pageNumber - 1) * pageSize)
.limit(pageSize)
.select('-_id -__v -user')
.lean(),
]);
if (pageParam) {
if (sortDirection === 'desc') {
query[sortBy] = { $lt: pageParam };
} else {
query[sortBy] = { $gt: pageParam };
}
}
if (search && search.trim()) {
try {
const searchResults = await Conversation.meiliSearch(search);
if (!searchResults?.hits?.length) {
return {
links: [],
nextCursor: undefined,
hasNextPage: false,
};
}
const conversationIds = searchResults.hits.map((hit) => hit.conversationId);
query['conversationId'] = { $in: conversationIds };
} catch (searchError) {
logger.error('[getSharedLinks] Meilisearch error', {
error: searchError.message,
user,
});
return {
links: [],
nextCursor: undefined,
hasNextPage: false,
};
}
}
const sort = {};
sort[sortBy] = sortDirection === 'desc' ? -1 : 1;
if (Array.isArray(query.conversationId)) {
query.conversationId = { $in: query.conversationId };
}
const sharedLinks = await SharedLink.find(query)
.sort(sort)
.limit(pageSize + 1)
.select('-__v -user')
.lean();
const hasNextPage = sharedLinks.length > pageSize;
const links = sharedLinks.slice(0, pageSize);
const nextCursor = hasNextPage ? links[links.length - 1][sortBy] : undefined;
const totalPages = Math.ceil((totalConvos || 1) / pageSize);
return {
links: links.map((link) => ({
shareId: link.shareId,
title: link?.title || 'Untitled',
isPublic: link.isPublic,
createdAt: link.createdAt,
conversationId: link.conversationId,
})),
nextCursor,
hasNextPage,
sharedLinks,
pages: totalPages,
pageNumber,
pageSize,
};
} catch (error) {
logger.error('[getSharedLinks] Error getting shares', {
error: error.message,
user,
});
throw new ShareServiceError('Error getting shares', 'SHARES_FETCH_ERROR');
logger.error('[getShareByPage] Error getting shares', error);
throw new Error('Error getting shares');
}
}
/**
* Creates a new shared link
* @param {string} user - The user ID
* @param {object} shareData - The share data
* @param {string} shareData.conversationId - The conversation ID
* @returns {Promise<object>} The created shared link
*/
async function createSharedLink(user, { conversationId, ...shareData }) {
try {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (share) {
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(share);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(share.messages, newConvoId),
});
}
const shareId = nanoid();
const messages = await getMessages({ conversationId });
const update = { ...shareData, shareId, messages, user };
const newShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
new: true,
upsert: true,
}).lean();
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(newShare);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(newShare.messages, newConvoId),
});
} catch (error) {
logger.error('[createSharedLink] Error creating shared link', error);
throw new Error('Error creating shared link');
}
}
/**
* Updates an existing shared link
* @param {string} user - The user ID
* @param {object} shareData - The share data to update
* @param {string} shareData.conversationId - The conversation ID
* @returns {Promise<object>} The updated shared link
*/
async function updateSharedLink(user, { conversationId, ...shareData }) {
try {
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
if (!share) {
return { message: 'Share not found' };
}
const messages = await getMessages({ conversationId });
const update = { ...shareData, messages, user };
const updatedShare = await SharedLink.findOneAndUpdate({ conversationId, user }, update, {
new: true,
upsert: false,
}).lean();
const newConvoId = anonymizeConvoId();
const sharedConvo = anonymizeConvo(updatedShare);
return Object.assign(sharedConvo, {
conversationId: newConvoId,
messages: anonymizeMessages(updatedShare.messages, newConvoId),
});
} catch (error) {
logger.error('[updateSharedLink] Error updating shared link', error);
throw new Error('Error updating shared link');
}
}
/**
* Deletes a shared link
* @param {string} user - The user ID
* @param {object} params - The deletion parameters
* @param {string} params.shareId - The share ID to delete
* @returns {Promise<object>} The result of the deletion
*/
async function deleteSharedLink(user, { shareId }) {
try {
const result = await SharedLink.findOneAndDelete({ shareId, user });
return result ? { message: 'Share deleted successfully' } : { message: 'Share not found' };
} catch (error) {
logger.error('[deleteSharedLink] Error deleting shared link', error);
throw new Error('Error deleting shared link');
}
}
/**
* Deletes all shared links for a specific user
* @param {string} user - The user ID
* @returns {Promise<object>} The result of the deletion
*/
async function deleteAllSharedLinks(user) {
try {
const result = await SharedLink.deleteMany({ user });
return {
message: 'All shared links deleted successfully',
message: 'All shared links have been deleted successfully',
deletedCount: result.deletedCount,
};
} catch (error) {
logger.error('[deleteAllSharedLinks] Error deleting shared links', {
error: error.message,
user,
});
throw new ShareServiceError('Error deleting shared links', 'BULK_DELETE_ERROR');
}
}
async function createSharedLink(user, conversationId) {
if (!user || !conversationId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const [existingShare, conversationMessages] = await Promise.all([
SharedLink.findOne({ conversationId, isPublic: true }).select('-_id -__v -user').lean(),
getMessages({ conversationId }),
]);
if (existingShare && existingShare.isPublic) {
throw new ShareServiceError('Share already exists', 'SHARE_EXISTS');
} else if (existingShare) {
await SharedLink.deleteOne({ conversationId });
}
const conversation = await Conversation.findOne({ conversationId }).lean();
const title = conversation?.title || 'Untitled';
const shareId = nanoid();
await SharedLink.create({
shareId,
conversationId,
messages: conversationMessages,
title,
user,
});
return { shareId, conversationId };
} catch (error) {
logger.error('[createSharedLink] Error creating shared link', {
error: error.message,
user,
conversationId,
});
throw new ShareServiceError('Error creating shared link', 'SHARE_CREATE_ERROR');
}
}
async function getSharedLink(user, conversationId) {
if (!user || !conversationId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const share = await SharedLink.findOne({ conversationId, user, isPublic: true })
.select('shareId -_id')
.lean();
if (!share) {
return { shareId: null, success: false };
}
return { shareId: share.shareId, success: true };
} catch (error) {
logger.error('[getSharedLink] Error getting shared link', {
error: error.message,
user,
conversationId,
});
throw new ShareServiceError('Error getting shared link', 'SHARE_FETCH_ERROR');
}
}
async function updateSharedLink(user, shareId) {
if (!user || !shareId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const share = await SharedLink.findOne({ shareId }).select('-_id -__v -user').lean();
if (!share) {
throw new ShareServiceError('Share not found', 'SHARE_NOT_FOUND');
}
const [updatedMessages] = await Promise.all([
getMessages({ conversationId: share.conversationId }),
]);
const newShareId = nanoid();
const update = {
messages: updatedMessages,
user,
shareId: newShareId,
};
const updatedShare = await SharedLink.findOneAndUpdate({ shareId, user }, update, {
new: true,
upsert: false,
runValidators: true,
}).lean();
if (!updatedShare) {
throw new ShareServiceError('Share update failed', 'SHARE_UPDATE_ERROR');
}
anonymizeConvo(updatedShare);
return { shareId: newShareId, conversationId: updatedShare.conversationId };
} catch (error) {
logger.error('[updateSharedLink] Error updating shared link', {
error: error.message,
user,
shareId,
});
throw new ShareServiceError(
error.code === 'SHARE_UPDATE_ERROR' ? error.message : 'Error updating shared link',
error.code || 'SHARE_UPDATE_ERROR',
);
}
}
async function deleteSharedLink(user, shareId) {
if (!user || !shareId) {
throw new ShareServiceError('Missing required parameters', 'INVALID_PARAMS');
}
try {
const result = await SharedLink.findOneAndDelete({ shareId, user }).lean();
if (!result) {
return null;
}
return {
success: true,
shareId,
message: 'Share deleted successfully',
};
} catch (error) {
logger.error('[deleteSharedLink] Error deleting shared link', {
error: error.message,
user,
shareId,
});
throw new ShareServiceError('Error deleting shared link', 'SHARE_DELETE_ERROR');
logger.error('[deleteAllSharedLinks] Error deleting shared links', error);
throw new Error('Error deleting shared links');
}
}
module.exports = {
SharedLink,
getSharedLink,
getSharedLinks,
createSharedLink,
updateSharedLink,

View File

@@ -1,6 +1,5 @@
const mongoose = require('mongoose');
const { encryptV2 } = require('~/server/utils/crypto');
const tokenSchema = require('./schema/tokenSchema');
const mongoose = require('mongoose');
const { logger } = require('~/config');
/**
@@ -8,39 +7,6 @@ const { logger } = require('~/config');
* @type {mongoose.Model}
*/
const Token = mongoose.model('Token', tokenSchema);
/**
* Fixes the indexes for the Token collection from legacy TTL indexes to the new expiresAt index.
*/
async function fixIndexes() {
try {
if (
process.env.NODE_ENV === 'CI' ||
process.env.NODE_ENV === 'development' ||
process.env.NODE_ENV === 'test'
) {
return;
}
const indexes = await Token.collection.indexes();
logger.debug('Existing Token Indexes:', JSON.stringify(indexes, null, 2));
const unwantedTTLIndexes = indexes.filter(
(index) => index.key.createdAt === 1 && index.expireAfterSeconds !== undefined,
);
if (unwantedTTLIndexes.length === 0) {
logger.debug('No unwanted Token indexes found.');
return;
}
for (const index of unwantedTTLIndexes) {
logger.debug(`Dropping unwanted Token index: ${index.name}`);
await Token.collection.dropIndex(index.name);
logger.debug(`Dropped Token index: ${index.name}`);
}
logger.debug('Token index cleanup completed successfully.');
} catch (error) {
logger.error('An error occurred while fixing Token indexes:', error);
}
}
fixIndexes();
/**
* Creates a new Token instance.
@@ -63,7 +29,8 @@ async function createToken(tokenData) {
expiresAt,
};
return await Token.create(newTokenData);
const newToken = new Token(newTokenData);
return await newToken.save();
} catch (error) {
logger.debug('An error occurred while creating token:', error);
throw error;
@@ -75,8 +42,7 @@ async function createToken(tokenData) {
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} [query.email] - The email of the user.
* @param {String} [query.identifier] - Unique, alternative identifier for the token.
* @param {String} query.email - The email of the user.
* @returns {Promise<Object|null>} The matched Token document, or null if not found.
* @throws Will throw an error if the find operation fails.
*/
@@ -93,9 +59,6 @@ async function findToken(query) {
if (query.email) {
conditions.push({ email: query.email });
}
if (query.identifier) {
conditions.push({ identifier: query.identifier });
}
const token = await Token.findOne({
$and: conditions,
@@ -113,8 +76,6 @@ async function findToken(query) {
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} [query.email] - The email of the user.
* @param {String} [query.identifier] - Unique, alternative identifier for the token.
* @param {Object} updateData - The data to update the Token with.
* @returns {Promise<mongoose.Document|null>} The updated Token document, or null if not found.
* @throws Will throw an error if the update operation fails.
@@ -133,20 +94,14 @@ async function updateToken(query, updateData) {
* @param {Object} query - The query to match against.
* @param {mongoose.Types.ObjectId|String} query.userId - The ID of the user.
* @param {String} query.token - The token value.
* @param {String} [query.email] - The email of the user.
* @param {String} [query.identifier] - Unique, alternative identifier for the token.
* @param {String} query.email - The email of the user.
* @returns {Promise<Object>} The result of the delete operation.
* @throws Will throw an error if the delete operation fails.
*/
async function deleteTokens(query) {
try {
return await Token.deleteMany({
$or: [
{ userId: query.userId },
{ token: query.token },
{ email: query.email },
{ identifier: query.identifier },
],
$or: [{ userId: query.userId }, { token: query.token }, { email: query.email }],
});
} catch (error) {
logger.debug('An error occurred while deleting tokens:', error);
@@ -154,46 +109,9 @@ async function deleteTokens(query) {
}
}
/**
* Handles the OAuth token by creating or updating the token.
* @param {object} fields
* @param {string} fields.userId - The user's ID.
* @param {string} fields.token - The full token to store.
* @param {string} fields.identifier - Unique, alternative identifier for the token.
* @param {number} fields.expiresIn - The number of seconds until the token expires.
* @param {object} fields.metadata - Additional metadata to store with the token.
* @param {string} [fields.type="oauth"] - The type of token. Default is 'oauth'.
*/
async function handleOAuthToken({
token,
userId,
identifier,
expiresIn,
metadata,
type = 'oauth',
}) {
const encrypedToken = await encryptV2(token);
const tokenData = {
type,
userId,
metadata,
identifier,
token: encrypedToken,
expiresIn: parseInt(expiresIn, 10) || 3600,
};
const existingToken = await findToken({ userId, identifier });
if (existingToken) {
return await updateToken({ identifier }, tokenData);
} else {
return await createToken(tokenData);
}
}
module.exports = {
findToken,
createToken,
findToken,
updateToken,
deleteTokens,
handleOAuthToken,
};

View File

@@ -1,96 +0,0 @@
const ToolCall = require('./schema/toolCallSchema');
/**
* Create a new tool call
* @param {ToolCallData} toolCallData - The tool call data
* @returns {Promise<ToolCallData>} The created tool call document
*/
async function createToolCall(toolCallData) {
try {
return await ToolCall.create(toolCallData);
} catch (error) {
throw new Error(`Error creating tool call: ${error.message}`);
}
}
/**
* Get a tool call by ID
* @param {string} id - The tool call document ID
* @returns {Promise<ToolCallData|null>} The tool call document or null if not found
*/
async function getToolCallById(id) {
try {
return await ToolCall.findById(id).lean();
} catch (error) {
throw new Error(`Error fetching tool call: ${error.message}`);
}
}
/**
* Get tool calls by message ID and user
* @param {string} messageId - The message ID
* @param {string} userId - The user's ObjectId
* @returns {Promise<Array>} Array of tool call documents
*/
async function getToolCallsByMessage(messageId, userId) {
try {
return await ToolCall.find({ messageId, user: userId }).lean();
} catch (error) {
throw new Error(`Error fetching tool calls: ${error.message}`);
}
}
/**
* Get tool calls by conversation ID and user
* @param {string} conversationId - The conversation ID
* @param {string} userId - The user's ObjectId
* @returns {Promise<ToolCallData[]>} Array of tool call documents
*/
async function getToolCallsByConvo(conversationId, userId) {
try {
return await ToolCall.find({ conversationId, user: userId }).lean();
} catch (error) {
throw new Error(`Error fetching tool calls: ${error.message}`);
}
}
/**
* Update a tool call
* @param {string} id - The tool call document ID
* @param {Partial<ToolCallData>} updateData - The data to update
* @returns {Promise<ToolCallData|null>} The updated tool call document or null if not found
*/
async function updateToolCall(id, updateData) {
try {
return await ToolCall.findByIdAndUpdate(id, updateData, { new: true }).lean();
} catch (error) {
throw new Error(`Error updating tool call: ${error.message}`);
}
}
/**
* Delete a tool call
* @param {string} userId - The related user's ObjectId
* @param {string} [conversationId] - The tool call conversation ID
* @returns {Promise<{ ok?: number; n?: number; deletedCount?: number }>} The result of the delete operation
*/
async function deleteToolCalls(userId, conversationId) {
try {
const query = { user: userId };
if (conversationId) {
query.conversationId = conversationId;
}
return await ToolCall.deleteMany(query);
} catch (error) {
throw new Error(`Error deleting tool call: ${error.message}`);
}
}
module.exports = {
createToolCall,
updateToolCall,
deleteToolCalls,
getToolCallById,
getToolCallsByConvo,
getToolCallsByMessage,
};

View File

@@ -27,9 +27,6 @@ transactionSchema.methods.calculateTokenValue = function () {
*/
transactionSchema.statics.create = async function (txData) {
const Transaction = this;
if (txData.rawAmount != null && isNaN(txData.rawAmount)) {
return;
}
const transaction = new Transaction(txData);
transaction.endpointTokenConfig = txData.endpointTokenConfig;

View File

@@ -1,6 +1,5 @@
const mongoose = require('mongoose');
const { MongoMemoryServer } = require('mongodb-memory-server');
const { Transaction } = require('./Transaction');
const Balance = require('./Balance');
const { spendTokens, spendStructuredTokens } = require('./spendTokens');
const { getMultiplier, getCacheMultiplier } = require('./tx');
@@ -347,28 +346,3 @@ describe('Structured Token Spending Tests', () => {
expect(result.completion.completion).toBeCloseTo(-50 * 15 * 1.15, 0); // Assuming multiplier is 15 and cancelRate is 1.15
});
});
describe('NaN Handling Tests', () => {
test('should skip transaction creation when rawAmount is NaN', async () => {
const userId = new mongoose.Types.ObjectId();
const initialBalance = 10000000;
await Balance.create({ user: userId, tokenCredits: initialBalance });
const model = 'gpt-3.5-turbo';
const txData = {
user: userId,
conversationId: 'test-conversation-id',
model,
context: 'test',
endpointTokenConfig: null,
rawAmount: NaN,
tokenType: 'prompt',
};
const result = await Transaction.create(txData);
expect(result).toBeUndefined();
const balance = await Balance.findOne({ user: userId });
expect(balance.tokenCredits).toBe(initialBalance);
});
});

View File

@@ -220,94 +220,4 @@ describe('Conversation Structure Tests', () => {
}
expect(currentNode.children.length).toBe(0); // Last message should have no children
});
test('Random order dates between parent and children messages', async () => {
const userId = 'testUser';
const conversationId = 'testConversation';
// Create messages with deliberately out-of-order timestamps but sequential creation
const messages = [
{
messageId: 'parent',
parentMessageId: null,
text: 'Parent Message',
createdAt: new Date('2023-01-01T00:00:00Z'), // Make parent earliest
},
{
messageId: 'child1',
parentMessageId: 'parent',
text: 'Child Message 1',
createdAt: new Date('2023-01-01T00:01:00Z'),
},
{
messageId: 'child2',
parentMessageId: 'parent',
text: 'Child Message 2',
createdAt: new Date('2023-01-01T00:02:00Z'),
},
{
messageId: 'grandchild1',
parentMessageId: 'child1',
text: 'Grandchild Message 1',
createdAt: new Date('2023-01-01T00:03:00Z'),
},
];
// Add common properties to all messages
messages.forEach((msg) => {
msg.conversationId = conversationId;
msg.user = userId;
msg.isCreatedByUser = false;
msg.error = false;
msg.unfinished = false;
});
// Save messages with overrideTimestamp set to true
await bulkSaveMessages(messages, true);
// Retrieve messages
const retrievedMessages = await getMessages({ conversationId, user: userId });
// Debug log to see what's being returned
console.log(
'Retrieved Messages:',
retrievedMessages.map((msg) => ({
messageId: msg.messageId,
parentMessageId: msg.parentMessageId,
createdAt: msg.createdAt,
})),
);
// Build tree
const tree = buildTree({ messages: retrievedMessages });
// Debug log to see the tree structure
console.log(
'Tree structure:',
tree.map((root) => ({
messageId: root.messageId,
children: root.children.map((child) => ({
messageId: child.messageId,
children: child.children.map((grandchild) => ({
messageId: grandchild.messageId,
})),
})),
})),
);
// Verify the structure before making assertions
expect(retrievedMessages.length).toBe(4); // Should have all 4 messages
// Check if messages are properly linked
const parentMsg = retrievedMessages.find((msg) => msg.messageId === 'parent');
expect(parentMsg.parentMessageId).toBeNull(); // Parent should have null parentMessageId
const childMsg1 = retrievedMessages.find((msg) => msg.messageId === 'child1');
expect(childMsg1.parentMessageId).toBe('parent');
// Then check tree structure
expect(tree.length).toBe(1); // Should have only one root message
expect(tree[0].messageId).toBe('parent');
expect(tree[0].children.length).toBe(2); // Should have two children
});
});

View File

@@ -18,7 +18,6 @@ const {
updateFileUsage,
} = require('./File');
const {
getMessage,
getMessages,
saveMessage,
recordMessage,
@@ -26,18 +25,10 @@ const {
deleteMessagesSince,
deleteMessages,
} = require('./Message');
const {
createSession,
findSession,
updateExpiration,
deleteSession,
deleteAllUserSessions,
generateRefreshToken,
countActiveSessions,
} = require('./Session');
const { getConvoTitle, getConvo, saveConvo, deleteConvos } = require('./Conversation');
const { getPreset, getPresets, savePreset, deletePresets } = require('./Preset');
const { createToken, findToken, updateToken, deleteTokens } = require('./Token');
const Session = require('./Session');
const Balance = require('./Balance');
const User = require('./User');
const Key = require('./Key');
@@ -60,7 +51,6 @@ module.exports = {
getFiles,
updateFileUsage,
getMessage,
getMessages,
saveMessage,
recordMessage,
@@ -83,15 +73,8 @@ module.exports = {
updateToken,
deleteTokens,
createSession,
findSession,
updateExpiration,
deleteSession,
deleteAllUserSessions,
generateRefreshToken,
countActiveSessions,
User,
Key,
Session,
Balance,
};

View File

@@ -35,9 +35,6 @@ const agentSchema = mongoose.Schema(
model_parameters: {
type: Object,
},
artifacts: {
type: String,
},
access_level: {
type: Number,
},
@@ -61,15 +58,6 @@ const agentSchema = mongoose.Schema(
type: String,
default: undefined,
},
hide_sequential_outputs: {
type: Boolean,
},
end_after_tools: {
type: Boolean,
},
agent_ids: {
type: [String],
},
isCollaborative: {
type: Boolean,
default: undefined,

View File

@@ -28,10 +28,6 @@ const assistantSchema = mongoose.Schema(
},
file_ids: { type: [String], default: undefined },
actions: { type: [String], default: undefined },
append_current_datetime: {
type: Boolean,
default: false,
},
},
{
timestamps: true,

View File

@@ -20,24 +20,33 @@ const convoSchema = mongoose.Schema(
index: true,
},
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
// google only
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
agentOptions: {
type: mongoose.Schema.Types.Mixed,
},
...conversationPreset,
agent_id: {
// for bingAI only
bingConversationId: {
type: String,
},
jailbreakConversationId: {
type: String,
},
conversationSignature: {
type: String,
},
clientId: {
type: String,
},
invocationId: {
type: Number,
},
tags: {
type: [String],
default: [],
meiliIndex: true,
},
files: {
type: [String],
},
expiredAt: {
type: Date,
},
},
{ timestamps: true },
);
@@ -46,13 +55,11 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
convoSchema.plugin(mongoMeili, {
host: process.env.MEILI_HOST,
apiKey: process.env.MEILI_MASTER_KEY,
/** Note: Will get created automatically if it doesn't exist already */
indexName: 'convos',
indexName: 'convos', // Will get created automatically if it doesn't exist already
primaryKey: 'conversationId',
});
}
convoSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
convoSchema.index({ createdAt: 1, updatedAt: 1 });
convoSchema.index({ conversationId: 1, user: 1 }, { unique: true });

View File

@@ -1,7 +1,5 @@
const mongoose = require('mongoose');
const conversationPreset = {
// endpoint: [azureOpenAI, openAI, anthropic, chatGPTBrowser]
// endpoint: [azureOpenAI, openAI, bingAI, anthropic, chatGPTBrowser]
endpoint: {
type: String,
default: null,
@@ -26,7 +24,6 @@ const conversationPreset = {
required: false,
},
// for google only
examples: { type: [{ type: mongoose.Schema.Types.Mixed }], default: undefined },
modelLabel: {
type: String,
required: false,
@@ -64,6 +61,19 @@ const conversationPreset = {
type: Number,
required: false,
},
// for bingai only
jailbreak: {
type: Boolean,
},
context: {
type: String,
},
systemMessage: {
type: String,
},
toneStyle: {
type: String,
},
file_ids: { type: [{ type: String }], default: undefined },
// deprecated
resendImages: {
@@ -73,12 +83,6 @@ const conversationPreset = {
promptCache: {
type: Boolean,
},
thinking: {
type: Boolean,
},
thinkingBudget: {
type: Number,
},
system: {
type: String,
},
@@ -89,10 +93,6 @@ const conversationPreset = {
imageDetail: {
type: String,
},
/* agents */
agent_id: {
type: String,
},
/* assistants */
assistant_id: {
type: String,
@@ -126,12 +126,64 @@ const conversationPreset = {
max_tokens: {
type: Number,
},
/** omni models only */
reasoning_effort: {
};
const agentOptions = {
model: {
type: String,
required: false,
},
// for azureOpenAI, openAI only
chatGptLabel: {
type: String,
required: false,
},
modelLabel: {
type: String,
required: false,
},
promptPrefix: {
type: String,
required: false,
},
temperature: {
type: Number,
required: false,
},
top_p: {
type: Number,
required: false,
},
// for google only
topP: {
type: Number,
required: false,
},
topK: {
type: Number,
required: false,
},
maxOutputTokens: {
type: Number,
required: false,
},
presence_penalty: {
type: Number,
required: false,
},
frequency_penalty: {
type: Number,
required: false,
},
context: {
type: String,
},
systemMessage: {
type: String,
},
};
module.exports = {
conversationPreset,
agentOptions,
};

View File

@@ -16,6 +16,7 @@ const keySchema = mongoose.Schema({
},
expiresAt: {
type: Date,
expires: 0,
},
});

View File

@@ -62,6 +62,10 @@ const messageSchema = mongoose.Schema(
required: true,
default: false,
},
isEdited: {
type: Boolean,
default: false,
},
unfinished: {
type: Boolean,
default: false,
@@ -134,9 +138,6 @@ const messageSchema = mongoose.Schema(
default: undefined,
},
*/
expiredAt: {
type: Date,
},
},
{ timestamps: true },
);
@@ -149,7 +150,7 @@ if (process.env.MEILI_HOST && process.env.MEILI_MASTER_KEY) {
primaryKey: 'messageId',
});
}
messageSchema.index({ expiredAt: 1 }, { expireAfterSeconds: 0 });
messageSchema.index({ createdAt: 1 });
messageSchema.index({ messageId: 1, user: 1 }, { unique: true });

View File

@@ -23,6 +23,8 @@ const presetSchema = mongoose.Schema(
order: {
type: Number,
},
// google only
examples: [{ type: mongoose.Schema.Types.Mixed }],
...conversationPreset,
agentOptions: {
type: mongoose.Schema.Types.Mixed,

View File

@@ -48,18 +48,6 @@ const roleSchema = new mongoose.Schema({
default: true,
},
},
[PermissionTypes.TEMPORARY_CHAT]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
[PermissionTypes.RUN_CODE]: {
[Permissions.USE]: {
type: Boolean,
default: true,
},
},
});
const Role = mongoose.model('Role', roleSchema);

View File

@@ -1,20 +0,0 @@
const mongoose = require('mongoose');
const sessionSchema = mongoose.Schema({
refreshTokenHash: {
type: String,
required: true,
},
expiration: {
type: Date,
required: true,
expires: 0,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
});
module.exports = sessionSchema;

View File

@@ -20,6 +20,14 @@ const shareSchema = mongoose.Schema(
index: true,
},
isPublic: {
type: Boolean,
default: false,
},
isVisible: {
type: Boolean,
default: false,
},
isAnonymous: {
type: Boolean,
default: true,
},

View File

@@ -10,10 +10,6 @@ const tokenSchema = new Schema({
email: {
type: String,
},
type: String,
identifier: {
type: String,
},
token: {
type: String,
required: true,
@@ -27,10 +23,6 @@ const tokenSchema = new Schema({
type: Date,
required: true,
},
metadata: {
type: Map,
of: Schema.Types.Mixed,
},
});
tokenSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 });

View File

@@ -1,54 +0,0 @@
const mongoose = require('mongoose');
/**
* @typedef {Object} ToolCallData
* @property {string} conversationId - The ID of the conversation
* @property {string} messageId - The ID of the message
* @property {string} toolId - The ID of the tool
* @property {string | ObjectId} user - The user's ObjectId
* @property {unknown} [result] - Optional result data
* @property {TAttachment[]} [attachments] - Optional attachments data
* @property {number} [blockIndex] - Optional code block index
* @property {number} [partIndex] - Optional part index
*/
/** @type {MongooseSchema<ToolCallData>} */
const toolCallSchema = mongoose.Schema(
{
conversationId: {
type: String,
required: true,
},
messageId: {
type: String,
required: true,
},
toolId: {
type: String,
required: true,
},
user: {
type: mongoose.Schema.Types.ObjectId,
ref: 'User',
required: true,
},
result: {
type: mongoose.Schema.Types.Mixed,
},
attachments: {
type: mongoose.Schema.Types.Mixed,
},
blockIndex: {
type: Number,
},
partIndex: {
type: Number,
},
},
{ timestamps: true },
);
toolCallSchema.index({ messageId: 1, user: 1 });
toolCallSchema.index({ conversationId: 1, user: 1 });
module.exports = mongoose.model('ToolCall', toolCallSchema);

View File

@@ -23,7 +23,6 @@ const { SystemRoles } = require('librechat-data-provider');
* @property {string} [ldapId] - Optional LDAP ID for the user
* @property {string} [githubId] - Optional GitHub ID for the user
* @property {string} [discordId] - Optional Discord ID for the user
* @property {string} [appleId] - Optional Apple ID for the user
* @property {Array} [plugins=[]] - List of plugins used by the user
* @property {Array.<MongoSession>} [refreshToken] - List of sessions with refresh tokens
* @property {Date} [expiresAt] - Optional expiration date of the file
@@ -39,12 +38,6 @@ const Session = mongoose.Schema({
},
});
const backupCodeSchema = mongoose.Schema({
codeHash: { type: String, required: true },
used: { type: Boolean, default: false },
usedAt: { type: Date, default: null },
});
/** @type {MongooseSchema<MongoUser>} */
const userSchema = mongoose.Schema(
{
@@ -118,19 +111,9 @@ const userSchema = mongoose.Schema(
unique: true,
sparse: true,
},
appleId: {
type: String,
unique: true,
sparse: true,
},
plugins: {
type: Array,
},
totpSecret: {
type: String,
},
backupCodes: {
type: [backupCodeSchema],
default: [],
},
refreshToken: {
type: [Session],

View File

@@ -1,50 +1,22 @@
const { matchModelName } = require('../utils');
const defaultRate = 6;
/**
* AWS Bedrock pricing
* source: https://aws.amazon.com/bedrock/pricing/
* */
/** AWS Bedrock pricing */
const bedrockValues = {
// Basic llama2 patterns
'llama2-13b': { prompt: 0.75, completion: 1.0 },
'llama2-70b': { prompt: 1.95, completion: 2.56 },
'llama3-8b': { prompt: 0.3, completion: 0.6 },
'llama3-70b': { prompt: 2.65, completion: 3.5 },
'llama3-1-8b': { prompt: 0.3, completion: 0.6 },
'llama3-1-70b': { prompt: 2.65, completion: 3.5 },
'llama3-1-405b': { prompt: 5.32, completion: 16.0 },
'llama2:13b': { prompt: 0.75, completion: 1.0 },
'llama2:70b': { prompt: 1.95, completion: 2.56 },
'llama2-70b': { prompt: 1.95, completion: 2.56 },
// Basic llama3 patterns
'llama3-8b': { prompt: 0.3, completion: 0.6 },
'llama3:8b': { prompt: 0.3, completion: 0.6 },
'llama3-70b': { prompt: 2.65, completion: 3.5 },
'llama3:70b': { prompt: 2.65, completion: 3.5 },
// llama3-x-Nb pattern
'llama3-1-8b': { prompt: 0.22, completion: 0.22 },
'llama3-1-70b': { prompt: 0.72, completion: 0.72 },
'llama3-1-405b': { prompt: 2.4, completion: 2.4 },
'llama3-2-1b': { prompt: 0.1, completion: 0.1 },
'llama3-2-3b': { prompt: 0.15, completion: 0.15 },
'llama3-2-11b': { prompt: 0.16, completion: 0.16 },
'llama3-2-90b': { prompt: 0.72, completion: 0.72 },
// llama3.x:Nb pattern
'llama3.1:8b': { prompt: 0.22, completion: 0.22 },
'llama3.1:70b': { prompt: 0.72, completion: 0.72 },
'llama3.1:405b': { prompt: 2.4, completion: 2.4 },
'llama3.2:1b': { prompt: 0.1, completion: 0.1 },
'llama3.2:3b': { prompt: 0.15, completion: 0.15 },
'llama3.2:11b': { prompt: 0.16, completion: 0.16 },
'llama3.2:90b': { prompt: 0.72, completion: 0.72 },
// llama-3.x-Nb pattern
'llama-3.1-8b': { prompt: 0.22, completion: 0.22 },
'llama-3.1-70b': { prompt: 0.72, completion: 0.72 },
'llama-3.1-405b': { prompt: 2.4, completion: 2.4 },
'llama-3.2-1b': { prompt: 0.1, completion: 0.1 },
'llama-3.2-3b': { prompt: 0.15, completion: 0.15 },
'llama-3.2-11b': { prompt: 0.16, completion: 0.16 },
'llama-3.2-90b': { prompt: 0.72, completion: 0.72 },
'llama-3.3-70b': { prompt: 2.65, completion: 3.5 },
'llama3.1:8b': { prompt: 0.3, completion: 0.6 },
'llama3.1:70b': { prompt: 2.65, completion: 3.5 },
'llama3.1:405b': { prompt: 5.32, completion: 16.0 },
'mistral-7b': { prompt: 0.15, completion: 0.2 },
'mistral-small': { prompt: 0.15, completion: 0.2 },
'mixtral-8x7b': { prompt: 0.45, completion: 0.7 },
@@ -58,9 +30,6 @@ const bedrockValues = {
'amazon.titan-text-lite-v1': { prompt: 0.15, completion: 0.2 },
'amazon.titan-text-express-v1': { prompt: 0.2, completion: 0.6 },
'amazon.titan-text-premier-v1:0': { prompt: 0.5, completion: 1.5 },
'amazon.nova-micro-v1:0': { prompt: 0.035, completion: 0.14 },
'amazon.nova-lite-v1:0': { prompt: 0.06, completion: 0.24 },
'amazon.nova-pro-v1:0': { prompt: 0.8, completion: 3.2 },
};
/**
@@ -75,9 +44,8 @@ const tokenValues = Object.assign(
'4k': { prompt: 1.5, completion: 2 },
'16k': { prompt: 3, completion: 4 },
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'o3-mini': { prompt: 1.1, completion: 4.4 },
'o1-mini': { prompt: 1.1, completion: 4.4 },
'o1-preview': { prompt: 15, completion: 60 },
'o1-mini': { prompt: 3, completion: 12 },
o1: { prompt: 15, completion: 60 },
'gpt-4o-mini': { prompt: 0.15, completion: 0.6 },
'gpt-4o': { prompt: 2.5, completion: 10 },
@@ -88,10 +56,8 @@ const tokenValues = Object.assign(
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-sonnet': { prompt: 3, completion: 15 },
'claude-3.5-sonnet': { prompt: 3, completion: 15 },
'claude-3-7-sonnet': { prompt: 3, completion: 15 },
'claude-3.7-sonnet': { prompt: 3, completion: 15 },
'claude-3-5-haiku': { prompt: 0.8, completion: 4 },
'claude-3.5-haiku': { prompt: 0.8, completion: 4 },
'claude-3-5-haiku': { prompt: 1, completion: 5 },
'claude-3.5-haiku': { prompt: 1, completion: 5 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
@@ -99,27 +65,11 @@ const tokenValues = Object.assign(
'claude-': { prompt: 0.8, completion: 2.4 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
'deepseek-reasoner': { prompt: 0.55, completion: 2.19 },
deepseek: { prompt: 0.14, completion: 0.28 },
/* cohere doesn't have rates for the older command models,
so this was from https://artificialanalysis.ai/models/command-light/providers */
command: { prompt: 0.38, completion: 0.38 },
'gemini-2.0-flash-lite': { prompt: 0.075, completion: 0.3 },
'gemini-2.0-flash': { prompt: 0.1, completion: 0.7 },
'gemini-2.0': { prompt: 0, completion: 0 }, // https://ai.google.dev/pricing
'gemini-1.5-flash-8b': { prompt: 0.075, completion: 0.3 },
'gemini-1.5-flash': { prompt: 0.15, completion: 0.6 },
'gemini-1.5': { prompt: 2.5, completion: 10 },
'gemini-pro-vision': { prompt: 0.5, completion: 1.5 },
gemini: { prompt: 0.5, completion: 1.5 },
'grok-2-vision-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-vision-latest': { prompt: 2.0, completion: 10.0 },
'grok-2-vision': { prompt: 2.0, completion: 10.0 },
'grok-vision-beta': { prompt: 5.0, completion: 15.0 },
'grok-2-1212': { prompt: 2.0, completion: 10.0 },
'grok-2-latest': { prompt: 2.0, completion: 10.0 },
'grok-2': { prompt: 2.0, completion: 10.0 },
'grok-beta': { prompt: 5.0, completion: 15.0 },
'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
gemini: { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
},
bedrockValues,
);
@@ -131,12 +81,10 @@ const tokenValues = Object.assign(
* @type {Object.<string, {write: number, read: number }>}
*/
const cacheTokenValues = {
'claude-3.7-sonnet': { write: 3.75, read: 0.3 },
'claude-3-7-sonnet': { write: 3.75, read: 0.3 },
'claude-3.5-sonnet': { write: 3.75, read: 0.3 },
'claude-3-5-sonnet': { write: 3.75, read: 0.3 },
'claude-3.5-haiku': { write: 1, read: 0.08 },
'claude-3-5-haiku': { write: 1, read: 0.08 },
'claude-3.5-haiku': { write: 1.25, read: 0.1 },
'claude-3-5-haiku': { write: 1.25, read: 0.1 },
'claude-3-haiku': { write: 0.3, read: 0.03 },
};
@@ -260,11 +208,4 @@ const getCacheMultiplier = ({ valueKey, cacheType, model, endpoint, endpointToke
return cacheTokenValues[valueKey]?.[cacheType] ?? null;
};
module.exports = {
tokenValues,
getValueKey,
getMultiplier,
getCacheMultiplier,
defaultRate,
cacheTokenValues,
};
module.exports = { tokenValues, getValueKey, getMultiplier, getCacheMultiplier, defaultRate };

View File

@@ -4,7 +4,6 @@ const {
tokenValues,
getValueKey,
getMultiplier,
cacheTokenValues,
getCacheMultiplier,
} = require('./tx');
@@ -80,20 +79,6 @@ describe('getValueKey', () => {
expect(getValueKey('chatgpt-4o-latest-0718')).toBe('gpt-4o');
});
it('should return "claude-3-7-sonnet" for model type of "claude-3-7-sonnet-"', () => {
expect(getValueKey('claude-3-7-sonnet-20240620')).toBe('claude-3-7-sonnet');
expect(getValueKey('anthropic/claude-3-7-sonnet')).toBe('claude-3-7-sonnet');
expect(getValueKey('claude-3-7-sonnet-turbo')).toBe('claude-3-7-sonnet');
expect(getValueKey('claude-3-7-sonnet-0125')).toBe('claude-3-7-sonnet');
});
it('should return "claude-3.7-sonnet" for model type of "claude-3.7-sonnet-"', () => {
expect(getValueKey('claude-3.7-sonnet-20240620')).toBe('claude-3.7-sonnet');
expect(getValueKey('anthropic/claude-3.7-sonnet')).toBe('claude-3.7-sonnet');
expect(getValueKey('claude-3.7-sonnet-turbo')).toBe('claude-3.7-sonnet');
expect(getValueKey('claude-3.7-sonnet-0125')).toBe('claude-3.7-sonnet');
});
it('should return "claude-3-5-sonnet" for model type of "claude-3-5-sonnet-"', () => {
expect(getValueKey('claude-3-5-sonnet-20240620')).toBe('claude-3-5-sonnet');
expect(getValueKey('anthropic/claude-3-5-sonnet')).toBe('claude-3-5-sonnet');
@@ -226,7 +211,6 @@ describe('getMultiplier', () => {
describe('AWS Bedrock Model Tests', () => {
const awsModels = [
'anthropic.claude-3-5-haiku-20241022-v1:0',
'anthropic.claude-3-haiku-20240307-v1:0',
'anthropic.claude-3-sonnet-20240229-v1:0',
'anthropic.claude-3-opus-20240229-v1:0',
@@ -253,9 +237,6 @@ describe('AWS Bedrock Model Tests', () => {
'ai21.j2-ultra-v1',
'amazon.titan-text-lite-v1',
'amazon.titan-text-express-v1',
'amazon.nova-micro-v1:0',
'amazon.nova-lite-v1:0',
'amazon.nova-pro-v1:0',
];
it('should return the correct prompt multipliers for all models', () => {
@@ -277,57 +258,14 @@ describe('AWS Bedrock Model Tests', () => {
});
});
describe('Deepseek Model Tests', () => {
const deepseekModels = ['deepseek-chat', 'deepseek-coder', 'deepseek-reasoner'];
it('should return the correct prompt multipliers for all models', () => {
const results = deepseekModels.map((model) => {
const valueKey = getValueKey(model);
const multiplier = getMultiplier({ valueKey, tokenType: 'prompt' });
return tokenValues[valueKey].prompt && multiplier === tokenValues[valueKey].prompt;
});
expect(results.every(Boolean)).toBe(true);
});
it('should return the correct completion multipliers for all models', () => {
const results = deepseekModels.map((model) => {
const valueKey = getValueKey(model);
const multiplier = getMultiplier({ valueKey, tokenType: 'completion' });
return tokenValues[valueKey].completion && multiplier === tokenValues[valueKey].completion;
});
expect(results.every(Boolean)).toBe(true);
});
it('should return the correct prompt multipliers for reasoning model', () => {
const model = 'deepseek-reasoner';
const valueKey = getValueKey(model);
expect(valueKey).toBe(model);
const multiplier = getMultiplier({ valueKey, tokenType: 'prompt' });
const result = tokenValues[valueKey].prompt && multiplier === tokenValues[valueKey].prompt;
expect(result).toBe(true);
});
});
describe('getCacheMultiplier', () => {
it('should return the correct cache multiplier for a given valueKey and cacheType', () => {
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'write' })).toBe(
cacheTokenValues['claude-3-5-sonnet'].write,
);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'read' })).toBe(
cacheTokenValues['claude-3-5-sonnet'].read,
);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-haiku', cacheType: 'write' })).toBe(
cacheTokenValues['claude-3-5-haiku'].write,
);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-haiku', cacheType: 'read' })).toBe(
cacheTokenValues['claude-3-5-haiku'].read,
);
expect(getCacheMultiplier({ valueKey: 'claude-3-haiku', cacheType: 'write' })).toBe(
cacheTokenValues['claude-3-haiku'].write,
);
expect(getCacheMultiplier({ valueKey: 'claude-3-haiku', cacheType: 'read' })).toBe(
cacheTokenValues['claude-3-haiku'].read,
);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'write' })).toBe(3.75);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-sonnet', cacheType: 'read' })).toBe(0.3);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-haiku', cacheType: 'write' })).toBe(1.25);
expect(getCacheMultiplier({ valueKey: 'claude-3-5-haiku', cacheType: 'read' })).toBe(0.1);
expect(getCacheMultiplier({ valueKey: 'claude-3-haiku', cacheType: 'write' })).toBe(0.3);
expect(getCacheMultiplier({ valueKey: 'claude-3-haiku', cacheType: 'read' })).toBe(0.03);
});
it('should return null if cacheType is provided but not found in cacheTokenValues', () => {
@@ -394,108 +332,3 @@ describe('getCacheMultiplier', () => {
).toBe(0.03);
});
});
describe('Google Model Tests', () => {
const googleModels = [
'gemini-2.0-flash-lite-preview-02-05',
'gemini-2.0-flash-001',
'gemini-2.0-flash-exp',
'gemini-2.0-pro-exp-02-05',
'gemini-1.5-flash-8b',
'gemini-1.5-flash-thinking',
'gemini-1.5-pro-latest',
'gemini-1.5-pro-preview-0409',
'gemini-pro-vision',
'gemini-1.0',
'gemini-pro',
];
it('should return the correct prompt and completion rates for all models', () => {
const results = googleModels.map((model) => {
const valueKey = getValueKey(model, EModelEndpoint.google);
const promptRate = getMultiplier({
model,
tokenType: 'prompt',
endpoint: EModelEndpoint.google,
});
const completionRate = getMultiplier({
model,
tokenType: 'completion',
endpoint: EModelEndpoint.google,
});
return { model, valueKey, promptRate, completionRate };
});
results.forEach(({ valueKey, promptRate, completionRate }) => {
expect(promptRate).toBe(tokenValues[valueKey].prompt);
expect(completionRate).toBe(tokenValues[valueKey].completion);
});
});
it('should map to the correct model keys', () => {
const expected = {
'gemini-2.0-flash-lite-preview-02-05': 'gemini-2.0-flash-lite',
'gemini-2.0-flash-001': 'gemini-2.0-flash',
'gemini-2.0-flash-exp': 'gemini-2.0-flash',
'gemini-2.0-pro-exp-02-05': 'gemini-2.0',
'gemini-1.5-flash-8b': 'gemini-1.5-flash-8b',
'gemini-1.5-flash-thinking': 'gemini-1.5-flash',
'gemini-1.5-pro-latest': 'gemini-1.5',
'gemini-1.5-pro-preview-0409': 'gemini-1.5',
'gemini-pro-vision': 'gemini-pro-vision',
'gemini-1.0': 'gemini',
'gemini-pro': 'gemini',
};
Object.entries(expected).forEach(([model, expectedKey]) => {
const valueKey = getValueKey(model, EModelEndpoint.google);
expect(valueKey).toBe(expectedKey);
});
});
it('should handle model names with different formats', () => {
const testCases = [
{ input: 'google/gemini-pro', expected: 'gemini' },
{ input: 'gemini-pro/google', expected: 'gemini' },
{ input: 'google/gemini-2.0-flash-lite', expected: 'gemini-2.0-flash-lite' },
];
testCases.forEach(({ input, expected }) => {
const valueKey = getValueKey(input, EModelEndpoint.google);
expect(valueKey).toBe(expected);
expect(
getMultiplier({ model: input, tokenType: 'prompt', endpoint: EModelEndpoint.google }),
).toBe(tokenValues[expected].prompt);
expect(
getMultiplier({ model: input, tokenType: 'completion', endpoint: EModelEndpoint.google }),
).toBe(tokenValues[expected].completion);
});
});
});
describe('Grok Model Tests - Pricing', () => {
describe('getMultiplier', () => {
test('should return correct prompt and completion rates for Grok vision models', () => {
const models = ['grok-2-vision-1212', 'grok-2-vision', 'grok-2-vision-latest'];
models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
});
});
test('should return correct prompt and completion rates for Grok text models', () => {
const models = ['grok-2-1212', 'grok-2', 'grok-2-latest'];
models.forEach((model) => {
expect(getMultiplier({ model, tokenType: 'prompt' })).toBe(2.0);
expect(getMultiplier({ model, tokenType: 'completion' })).toBe(10.0);
});
});
test('should return correct prompt and completion rates for Grok beta models', () => {
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'prompt' })).toBe(5.0);
expect(getMultiplier({ model: 'grok-vision-beta', tokenType: 'completion' })).toBe(15.0);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'prompt' })).toBe(5.0);
expect(getMultiplier({ model: 'grok-beta', tokenType: 'completion' })).toBe(15.0);
});
});
});

View File

@@ -1,6 +1,6 @@
{
"name": "@librechat/backend",
"version": "v0.7.7-rc1",
"version": "v0.7.5",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
@@ -34,21 +34,20 @@
},
"homepage": "https://librechat.ai",
"dependencies": {
"@anthropic-ai/sdk": "^0.37.0",
"@anthropic-ai/sdk": "^0.16.1",
"@azure/search-documents": "^12.0.0",
"@google/generative-ai": "^0.21.0",
"@googleapis/youtube": "^20.0.0",
"@keyv/mongo": "^2.1.8",
"@keyv/redis": "^2.8.1",
"@langchain/community": "^0.3.14",
"@langchain/core": "^0.3.40",
"@langchain/google-genai": "^0.1.9",
"@langchain/google-vertexai": "^0.2.0",
"@langchain/community": "^0.3.13",
"@langchain/core": "^0.3.17",
"@langchain/google-genai": "^0.1.3",
"@langchain/google-vertexai": "^0.1.2",
"@langchain/textsplitters": "^0.1.0",
"@librechat/agents": "^2.1.3",
"@waylaidwanderer/fetch-event-source": "^3.0.1",
"axios": "1.7.8",
"@librechat/agents": "^1.7.7",
"axios": "^1.7.7",
"bcryptjs": "^2.4.3",
"cheerio": "^1.0.0-rc.12",
"cohere-ai": "^7.9.1",
"compression": "^1.7.4",
"connect-redis": "^7.1.0",
@@ -57,17 +56,15 @@
"cors": "^2.8.5",
"dedent": "^1.5.3",
"dotenv": "^16.0.3",
"eventsource": "^3.0.2",
"express": "^4.21.2",
"express": "^4.21.1",
"express-mongo-sanitize": "^2.2.0",
"express-rate-limit": "^7.4.1",
"express-session": "^1.18.1",
"express-static-gzip": "^2.2.0",
"file-type": "^18.7.0",
"firebase": "^11.0.2",
"googleapis": "^126.0.1",
"handlebars": "^4.7.7",
"https-proxy-agent": "^7.0.6",
"html": "^1.0.0",
"ioredis": "^5.3.2",
"js-yaml": "^4.1.0",
"jsonwebtoken": "^9.0.0",
@@ -76,22 +73,21 @@
"klona": "^2.0.6",
"langchain": "^0.2.19",
"librechat-data-provider": "*",
"librechat-mcp": "*",
"lodash": "^4.17.21",
"meilisearch": "^0.38.0",
"memorystore": "^1.6.7",
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^8.9.5",
"mongoose": "^7.3.3",
"multer": "^1.4.5-lts.1",
"nanoid": "^3.3.7",
"nodejs-gpt": "^1.37.4",
"nodemailer": "^6.9.15",
"ollama": "^0.5.0",
"openai": "^4.47.1",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^5.4.2",
"passport": "^0.6.0",
"passport-apple": "^2.0.2",
"passport-custom": "^1.1.1",
"passport-discord": "^0.1.4",
"passport-facebook": "^3.0.0",
"passport-github2": "^0.1.12",
@@ -99,19 +95,19 @@
"passport-jwt": "^4.0.1",
"passport-ldapauth": "^3.0.1",
"passport-local": "^1.0.0",
"pino": "^8.12.1",
"sharp": "^0.32.6",
"tiktoken": "^1.0.15",
"traverse": "^0.6.7",
"ua-parser-js": "^1.0.36",
"winston": "^3.11.0",
"winston-daily-rotate-file": "^4.7.1",
"youtube-transcript": "^1.2.1",
"zod": "^3.22.4"
},
"devDependencies": {
"jest": "^29.7.0",
"mongodb-memory-server": "^10.1.3",
"nodemon": "^3.0.3",
"supertest": "^7.0.0"
"mongodb-memory-server": "^10.0.0",
"nodemon": "^3.0.1",
"supertest": "^6.3.3"
}
}

Some files were not shown because too many files have changed in this diff Show More