Compare commits

..

3 Commits

Author SHA1 Message Date
Dan Orlando
ab4fc6eeb1 add test id to TitleButton 2024-02-26 14:38:21 -07:00
Dan Orlando
55ce33aa6a Fix landing spec 2024-02-26 13:19:20 -07:00
Dan Orlando
6f250e0d6b Fix Keys Spec 2024-02-26 13:14:37 -07:00
572 changed files with 11540 additions and 28057 deletions

View File

@@ -13,6 +13,9 @@
# Server Configuration #
#==================================================#
APP_TITLE=LibreChat
# CUSTOM_FOOTER="My custom footer"
HOST=localhost
PORT=3080
@@ -23,13 +26,6 @@ DOMAIN_SERVER=http://localhost:3080
NO_INDEX=true
#===============#
# JSON Logging #
#===============#
# Use when process console logs in cloud deployment like GCP/AWS
CONSOLE_JSON=false
#===============#
# Debug Logging #
#===============#
@@ -44,62 +40,38 @@ DEBUG_CONSOLE=false
# UID=1000
# GID=1000
#===============#
# Configuration #
#===============#
# Use an absolute path, a relative path, or a URL
# CONFIG_PATH="/alternative/path/to/librechat.yaml"
#===================================================#
# Endpoints #
#===================================================#
# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,google,gptPlugins,anthropic
# ENDPOINTS=openAI,assistants,azureOpenAI,bingAI,chatGPTBrowser,google,gptPlugins,anthropic
PROXY=
#===================================#
# Known Endpoints - librechat.yaml #
#===================================#
# https://docs.librechat.ai/install/configuration/ai_endpoints.html
# GROQ_API_KEY=
# SHUTTLEAI_KEY=
# OPENROUTER_KEY=
# MISTRAL_API_KEY=
# ANYSCALE_API_KEY=
# FIREWORKS_API_KEY=
# PERPLEXITY_API_KEY=
# TOGETHERAI_API_KEY=
#============#
# Anthropic #
#============#
ANTHROPIC_API_KEY=user_provided
# ANTHROPIC_MODELS=claude-3-opus-20240229,claude-3-sonnet-20240229,claude-3-haiku-20240307,claude-2.1,claude-2,claude-1.2,claude-1,claude-1-100k,claude-instant-1,claude-instant-1-100k
ANTHROPIC_MODELS=claude-1,claude-instant-1,claude-2
# ANTHROPIC_REVERSE_PROXY=
#============#
# Azure #
#============#
# AZURE_API_KEY=
AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo
# PLUGINS_USE_AZURE="true"
# Note: these variables are DEPRECATED
# Use the `librechat.yaml` configuration for `azureOpenAI` instead
# You may also continue to use them if you opt out of using the `librechat.yaml` configuration
AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE
# AZURE_OPENAI_DEFAULT_MODEL=gpt-3.5-turbo # Deprecated
# AZURE_OPENAI_MODELS=gpt-3.5-turbo,gpt-4 # Deprecated
# AZURE_USE_MODEL_AS_DEPLOYMENT_NAME=TRUE # Deprecated
# AZURE_API_KEY= # Deprecated
# AZURE_OPENAI_API_INSTANCE_NAME= # Deprecated
# AZURE_OPENAI_API_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_VERSION= # Deprecated
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME= # Deprecated
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME= # Deprecated
# PLUGINS_USE_AZURE="true" # Deprecated
# AZURE_OPENAI_API_INSTANCE_NAME=
# AZURE_OPENAI_API_DEPLOYMENT_NAME=
# AZURE_OPENAI_API_VERSION=
# AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=
# AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=
#============#
# BingAI #
@@ -108,6 +80,14 @@ ANTHROPIC_API_KEY=user_provided
BINGAI_TOKEN=user_provided
# BINGAI_HOST=https://cn.bing.com
#============#
# ChatGPT #
#============#
CHATGPT_TOKEN=
CHATGPT_MODELS=text-davinci-002-render-sha
# CHATGPT_REVERSE_PROXY=
#============#
# Google #
#============#
@@ -135,20 +115,20 @@ DEBUG_OPENAI=false
# OPENAI_REVERSE_PROXY=
# OPENAI_ORGANIZATION=
# OPENAI_ORGANIZATION=
#====================#
# Assistants API #
#====================#
ASSISTANTS_API_KEY=user_provided
# ASSISTANTS_API_KEY=
# ASSISTANTS_BASE_URL=
# ASSISTANTS_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
#============#
# OpenRouter #
#============#
# !!!Warning: Use the variable above instead of this one. Using this one will override the OpenAI endpoint
# OPENROUTER_API_KEY=
#============#
@@ -192,7 +172,7 @@ AZURE_AI_SEARCH_SEARCH_OPTION_SELECT=
# Google
#-----------------
GOOGLE_SEARCH_API_KEY=
GOOGLE_API_KEY=
GOOGLE_CSE_ID=
# SerpAPI
@@ -203,14 +183,6 @@ SERPAPI_API_KEY=
#-----------------
SD_WEBUI_URL=http://host.docker.internal:7860
# Tavily
#-----------------
TAVILY_API_KEY=
# Traversaal
#-----------------
TRAVERSAAL_API_KEY=
# WolframAlpha
#-----------------
WOLFRAM_APP_ID=
@@ -266,8 +238,6 @@ LIMIT_MESSAGE_USER=false
MESSAGE_USER_MAX=40
MESSAGE_USER_WINDOW=1
ILLEGAL_MODEL_REQ_SCORE=5
#========================#
# Balance #
#========================#
@@ -316,9 +286,6 @@ OPENID_ISSUER=
OPENID_SESSION_SECRET=
OPENID_SCOPE="openid profile email"
OPENID_CALLBACK_URL=/oauth/openid/callback
OPENID_REQUIRED_ROLE=
OPENID_REQUIRED_ROLE_TOKEN_KIND=
OPENID_REQUIRED_ROLE_PARAMETER_PATH=
OPENID_BUTTON_LABEL=
OPENID_IMAGE_URL=
@@ -327,15 +294,15 @@ OPENID_IMAGE_URL=
# Email Password Reset #
#========================#
EMAIL_SERVICE=
EMAIL_HOST=
EMAIL_PORT=25
EMAIL_ENCRYPTION=
EMAIL_ENCRYPTION_HOSTNAME=
EMAIL_ALLOW_SELFSIGNED=
EMAIL_USERNAME=
EMAIL_PASSWORD=
EMAIL_FROM_NAME=
EMAIL_SERVICE=
EMAIL_HOST=
EMAIL_PORT=25
EMAIL_ENCRYPTION=
EMAIL_ENCRYPTION_HOSTNAME=
EMAIL_ALLOW_SELFSIGNED=
EMAIL_USERNAME=
EMAIL_PASSWORD=
EMAIL_FROM_NAME=
EMAIL_FROM=noreply@librechat.ai
#========================#
@@ -349,16 +316,6 @@ FIREBASE_STORAGE_BUCKET=
FIREBASE_MESSAGING_SENDER_ID=
FIREBASE_APP_ID=
#===================================================#
# UI #
#===================================================#
APP_TITLE=LibreChat
# CUSTOM_FOOTER="My custom footer"
HELP_AND_FAQ_URL=https://librechat.ai
# SHOW_BIRTHDAY_ICON=true
#==================================================#
# Others #
#==================================================#
@@ -366,8 +323,15 @@ HELP_AND_FAQ_URL=https://librechat.ai
# NODE_ENV=
# If using Redis, you should flush the cache after changing any LibreChat settings
# REDIS_URI=
# USE_REDIS=
# Give the AI Icon a Birthday Hat :)
# Will show automatically on February 11th (LibreChat's birthday)
# Set this to false to disable the birthday hat
# Set to true to enable all the time.
# SHOW_BIRTHDAY_ICON=true
# E2E_USER_EMAIL=
# E2E_USER_PASSWORD=
# E2E_USER_PASSWORD=

View File

@@ -19,7 +19,6 @@ module.exports = {
'e2e/playwright-report/**/*',
'packages/data-provider/types/**/*',
'packages/data-provider/dist/**/*',
'packages/data-provider/test_bundle/**/*',
'data-node/**/*',
'meili_data/**/*',
'node_modules/**/*',

View File

@@ -60,7 +60,7 @@ representative at an online or offline event.
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement here on GitHub or
on the official [Discord Server](https://discord.librechat.ai).
on the official [Discord Server](https://discord.gg/uDyZ5Tzhct).
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the

View File

@@ -8,7 +8,7 @@ If the feature you would like to contribute has not already received prior appro
Please note that a pull request involving a feature that has not been reviewed and approved by the project maintainers may be rejected. We appreciate your understanding and cooperation.
If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.librechat.ai), where you can engage with other contributors and seek guidance from the community.
If you would like to discuss the changes you wish to make, join our [Discord community](https://discord.gg/uDyZ5Tzhct), where you can engage with other contributors and seek guidance from the community.
## Our Standards

View File

@@ -50,7 +50,7 @@ body:
id: terms
attributes:
label: Code of Conduct
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md)
options:
- label: I agree to follow this project's Code of Conduct
required: true

6
.github/SECURITY.md vendored
View File

@@ -12,7 +12,7 @@ When reporting a security vulnerability, you have the following options to reach
- **Option 2: GitHub Issues**: You can initiate first contact via GitHub Issues. However, please note that initial contact through GitHub Issues should not include any sensitive details.
- **Option 3: Discord Server**: You can join our [Discord community](https://discord.librechat.ai) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
- **Option 3: Discord Server**: You can join our [Discord community](https://discord.gg/5rbRxn4uME) and initiate first contact in the `#issues` channel. However, please ensure that initial contact through Discord does not include any sensitive details.
_After the initial contact, we will establish a private communication channel for further discussion._
@@ -39,11 +39,11 @@ Please note that as a security-conscious community, we may not always disclose d
This security policy applies to the following GitHub repository:
- Repository: [LibreChat](https://github.librechat.ai)
- Repository: [LibreChat](https://github.com/danny-avila/LibreChat)
## Contact
If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.librechat.ai) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
If you have any questions or concerns regarding the security of our project, please join our [Discord community](https://discord.gg/NGaa9RPCft) and report them in the appropriate channel. You can also reach out to us by [opening an issue](https://github.com/danny-avila/LibreChat/issues/new) on GitHub. Please note that the response time may vary depending on the nature and severity of the inquiry.
## Acknowledgments

View File

@@ -15,9 +15,8 @@ Please delete any irrelevant options.
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
- [ ] This change requires a documentation update
- [ ] Translation update
- [ ] Documentation update
- [ ] Translation update
## Testing
@@ -27,8 +26,6 @@ Please describe your test process and include instructions so that we can reprod
## Checklist
Please delete any irrelevant options.
- [ ] My code adheres to this project's style guidelines
- [ ] I have performed a self-review of my own code
- [ ] I have commented in any complex areas of my code
@@ -37,4 +34,3 @@ Please delete any irrelevant options.
- [ ] I have written tests demonstrating that my changes are effective or that my feature works
- [ ] Local unit tests pass with my changes
- [ ] Any changes dependent on mine have been merged and published in downstream modules.
- [ ] New documents have been locally validated with mkdocs

View File

@@ -35,24 +35,6 @@ jobs:
- name: Install Data Provider
run: npm run build:data-provider
- name: Create empty auth.json file
run: |
mkdir -p api/data
echo '{}' > api/data/auth.json
- name: Check for Circular dependency in rollup
working-directory: ./packages/data-provider
run: |
output=$(npm run rollup:api)
echo "$output"
if echo "$output" | grep -q "Circular dependency"; then
echo "Error: Circular dependency detected!"
exit 1
fi
- name: Prepare .env.test file
run: cp api/test/.env.test.example api/test/.env.test
- name: Run unit tests
run: cd api && npm run test:ci
@@ -63,4 +45,4 @@ jobs:
- name: Run linters
uses: wearerequired/lint-action@v2
with:
eslint: true
eslint: true

83
.github/workflows/container.yml vendored Normal file
View File

@@ -0,0 +1,83 @@
name: Docker Compose Build on Tag
# The workflow is triggered when a tag is pushed
on:
push:
tags:
- "*"
jobs:
build:
runs-on: ubuntu-latest
steps:
# Check out the repository
- name: Checkout
uses: actions/checkout@v4
# Set up Docker
- name: Set up Docker
uses: docker/setup-buildx-action@v3
# Set up QEMU for cross-platform builds
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Log in to GitHub Container Registry
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Prepare Docker Build
- name: Build Docker images
run: |
cp .env.example .env
# Tag and push librechat-api
- name: Docker metadata for librechat-api
id: meta-librechat-api
uses: docker/metadata-action@v5
with:
images: |
ghcr.io/${{ github.repository_owner }}/librechat-api
tags: |
type=raw,value=latest
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
- name: Build and librechat-api
uses: docker/build-push-action@v5
with:
file: Dockerfile.multi
context: .
push: true
tags: ${{ steps.meta-librechat-api.outputs.tags }}
platforms: linux/amd64,linux/arm64
target: api-build
# Tag and push librechat
- name: Docker metadata for librechat
id: meta-librechat
uses: docker/metadata-action@v5
with:
images: |
ghcr.io/${{ github.repository_owner }}/librechat
tags: |
type=raw,value=latest
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
- name: Build and librechat
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
push: true
tags: ${{ steps.meta-librechat.outputs.tags }}
platforms: linux/amd64,linux/arm64
target: node

View File

@@ -13,27 +13,14 @@ on:
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- target: api-build
file: Dockerfile.multi
image_name: librechat-dev-api
- target: node
file: Dockerfile
image_name: librechat-dev
steps:
# Check out the repository
- name: Checkout
uses: actions/checkout@v4
# Set up QEMU
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Set up Docker Buildx
- name: Set up Docker Buildx
# Set up Docker
- name: Set up Docker
uses: docker/setup-buildx-action@v3
# Log in to GitHub Container Registry
@@ -51,22 +38,35 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# Prepare the environment
- name: Prepare environment
# Build Docker images
- name: Build Docker images
run: |
cp .env.example .env
docker build -f Dockerfile.multi --target api-build -t librechat-dev-api .
docker build -f Dockerfile -t librechat-dev .
# Build and push Docker images for each target
- name: Build and push Docker images
uses: docker/build-push-action@v5
with:
context: .
file: ${{ matrix.file }}
push: true
tags: |
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.sha }}
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.sha }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
platforms: linux/amd64,linux/arm64
target: ${{ matrix.target }}
# Tag and push the images to GitHub Container Registry
- name: Tag and push images to GHCR
run: |
docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:${{ github.sha }}
docker tag librechat-dev-api:latest ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev-api:latest
docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:${{ github.sha }}
docker tag librechat-dev:latest ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
docker push ghcr.io/${{ github.repository_owner }}/librechat-dev:latest
# Tag and push the images to Docker Hub
- name: Tag and push images to Docker Hub
run: |
docker tag librechat-dev-api:latest ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev-api:${{ github.sha }}
docker push ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev-api:${{ github.sha }}
docker tag librechat-dev-api:latest ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev-api:latest
docker push ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev-api:latest
docker tag librechat-dev:latest ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev:${{ github.sha }}
docker push ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev:${{ github.sha }}
docker tag librechat-dev:latest ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev:latest
docker push ${{ secrets.DOCKERHUB_USERNAME }}/librechat-dev:latest

View File

@@ -1,20 +0,0 @@
name: 'generate_embeddings'
on:
workflow_dispatch:
push:
branches:
- main
paths:
- 'docs/**'
jobs:
generate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: supabase/embeddings-generator@v0.0.5
with:
supabase-url: ${{ secrets.SUPABASE_URL }}
supabase-service-role-key: ${{ secrets.SUPABASE_SERVICE_ROLE_KEY }}
openai-key: ${{ secrets.OPENAI_DOC_EMBEDDINGS_KEY }}
docs-root-path: 'docs'

View File

@@ -0,0 +1,88 @@
name: Docker Compose Build Latest Tag (Manual Dispatch)
# The workflow is manually triggered
on:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
steps:
# Check out the repository
- name: Checkout
uses: actions/checkout@v4
# Fetch all tags and set the latest tag
- name: Fetch tags and set the latest tag
run: |
git fetch --tags
echo "LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)" >> $GITHUB_ENV
# Set up Docker
- name: Set up Docker
uses: docker/setup-buildx-action@v3
# Set up QEMU
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Log in to GitHub Container Registry
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Prepare Docker Build
- name: Build Docker images
run: cp .env.example .env
# Docker metadata for librechat-api
- name: Docker metadata for librechat-api
id: meta-librechat-api
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository_owner }}/librechat-api
tags: |
type=raw,value=${{ env.LATEST_TAG }},enable=true
type=raw,value=latest,enable=true
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
# Build and push librechat-api
- name: Build and push librechat-api
uses: docker/build-push-action@v5
with:
file: Dockerfile.multi
context: .
push: true
tags: ${{ steps.meta-librechat-api.outputs.tags }}
platforms: linux/amd64,linux/arm64
target: api-build
# Docker metadata for librechat
- name: Docker metadata for librechat
id: meta-librechat
uses: docker/metadata-action@v5
with:
images: ghcr.io/${{ github.repository_owner }}/librechat
tags: |
type=raw,value=${{ env.LATEST_TAG }},enable=true
type=raw,value=latest,enable=true
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
# Build and push librechat
- name: Build and push librechat
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
push: true
tags: ${{ steps.meta-librechat.outputs.tags }}
platforms: linux/amd64,linux/arm64
target: node

View File

@@ -1,20 +1,12 @@
name: Docker Compose Build Latest Main Image Tag (Manual Dispatch)
# The workflow is manually triggered
on:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- target: api-build
file: Dockerfile.multi
image_name: librechat-api
- target: node
file: Dockerfile
image_name: librechat
steps:
- name: Checkout
@@ -25,15 +17,12 @@ jobs:
git fetch --tags
echo "LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`)" >> $GITHUB_ENV
# Set up QEMU
- name: Set up Docker
uses: docker/setup-buildx-action@v3
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Set up Docker Buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Log in to GitHub Container Registry
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
@@ -41,29 +30,26 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@v3
# Docker metadata for librechat
- name: Docker metadata for librechat
id: meta-librechat
uses: docker/metadata-action@v5
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
images: ghcr.io/${{ github.repository_owner }}/librechat
tags: |
type=raw,value=${{ env.LATEST_TAG }},enable=true
type=raw,value=latest,enable=true
type=semver,pattern={{version}}
type=semver,pattern={{major}}
type=semver,pattern={{major}}.{{minor}}
# Prepare the environment
- name: Prepare environment
run: |
cp .env.example .env
# Build and push Docker images for each target
- name: Build and push Docker images
# Build and push librechat with only linux/amd64 platform
- name: Build and push librechat
uses: docker/build-push-action@v5
with:
file: Dockerfile
context: .
file: ${{ matrix.file }}
push: true
tags: |
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ env.LATEST_TAG }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
platforms: linux/amd64,linux/arm64
target: ${{ matrix.target }}
tags: ${{ steps.meta-librechat.outputs.tags }}
platforms: linux/amd64
target: node

View File

@@ -1,67 +0,0 @@
name: Docker Images Build on Tag
on:
push:
tags:
- '*'
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
include:
- target: api-build
file: Dockerfile.multi
image_name: librechat-api
- target: node
file: Dockerfile
image_name: librechat
steps:
# Check out the repository
- name: Checkout
uses: actions/checkout@v4
# Set up QEMU
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
# Set up Docker Buildx
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Log in to GitHub Container Registry
- name: Log in to GitHub Container Registry
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
# Login to Docker Hub
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# Prepare the environment
- name: Prepare environment
run: |
cp .env.example .env
# Build and push Docker images for each target
- name: Build and push Docker images
uses: docker/build-push-action@v5
with:
context: .
file: ${{ matrix.file }}
push: true
tags: |
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:${{ github.ref_name }}
ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:${{ github.ref_name }}
${{ secrets.DOCKERHUB_USERNAME }}/${{ matrix.image_name }}:latest
platforms: linux/amd64,linux/arm64
target: ${{ matrix.target }}

7
.gitignore vendored
View File

@@ -50,7 +50,6 @@ bower_components/
#config file
librechat.yaml
librechat.yml
# Environment
.npmrc
@@ -75,7 +74,6 @@ src/style - official.css
config.local.ts
**/storageState.json
junit.xml
**/.venv/
# docker override file
docker-compose.override.yaml
@@ -93,7 +91,4 @@ auth.json
!client/src/components/Nav/SettingsTabs/Data/
# User uploads
uploads/
# owner
release/
uploads/

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env sh
#!/usr/bin/env sh
set -e
. "$(dirname -- "$0")/_/husky.sh"
[ -n "$CI" ] && exit 0

View File

@@ -1,35 +1,20 @@
# v0.7.1
# Base node image
FROM node:18-alpine3.18 AS node
FROM node:18-alpine AS node
RUN apk add g++ make py3-pip
RUN npm install -g node-gyp
RUN apk --no-cache add curl
RUN mkdir -p /app && chown node:node /app
COPY . /app
WORKDIR /app
USER node
COPY --chown=node:node . .
# Allow mounting of these files, which have no default
# values.
RUN touch .env
RUN npm config set fetch-retry-maxtimeout 600000
RUN npm config set fetch-retries 5
RUN npm config set fetch-retry-mintimeout 15000
RUN npm install --no-audit
# Install call deps - Install curl for health check
RUN apk --no-cache add curl && \
npm ci
# React client build
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run frontend
# Create directories for the volumes to inherit
# the correct permissions
RUN mkdir -p /app/client/public/images /app/api/logs
# Node API setup
EXPOSE 3080
ENV HOST=0.0.0.0

View File

@@ -1,5 +1,3 @@
# v0.7.1
# Build API, Client and Data Provider
FROM node:20-alpine AS base
@@ -13,12 +11,11 @@ RUN npm run build
# React client build
FROM data-provider-build AS client-build
WORKDIR /app/client
COPY ./client/package*.json ./
COPY ./client/ ./
# Copy data-provider to client's node_modules
RUN mkdir -p /app/client/node_modules/librechat-data-provider/
RUN cp -R /app/packages/data-provider/* /app/client/node_modules/librechat-data-provider/
RUN npm install
COPY ./client/ ./
ENV NODE_OPTIONS="--max-old-space-size=2048"
RUN npm run build
@@ -27,8 +24,6 @@ FROM data-provider-build AS api-build
WORKDIR /app/api
COPY api/package*.json ./
COPY api/ ./
# Copy helper scripts
COPY config/ ./
# Copy data-provider to API's node_modules
RUN mkdir -p /app/api/node_modules/librechat-data-provider/
RUN cp -R /app/packages/data-provider/* /app/api/node_modules/librechat-data-provider/

View File

@@ -1,10 +1,10 @@
<p align="center">
<a href="https://librechat.ai">
<a href="https://docs.librechat.ai">
<img src="docs/assets/LibreChat.svg" height="256">
</a>
<h1 align="center">
<a href="https://librechat.ai">LibreChat</a>
</h1>
<a href="https://docs.librechat.ai">
<h1 align="center">LibreChat</h1>
</a>
</p>
<p align="center">
@@ -27,7 +27,7 @@
</p>
<p align="center">
<a href="https://railway.app/template/b5k2mn?referralCode=myKrVZ">
<a href="https://railway.app/template/b5k2mn?referralCode=HI9hWz">
<img src="https://railway.app/button.svg" alt="Deploy on Railway" height="30">
</a>
<a href="https://zeabur.com/templates/0X2ZY8">
@@ -39,36 +39,30 @@
</p>
# 📃 Features
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
- 💬 Multimodal Chat:
- Upload and analyze images with Claude 3, GPT-4, and Gemini Vision 📸
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
- Non-OpenAI Agents in Active Development 🚧
- 🌎 Multilingual UI:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
- 🤖 AI model selection: OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
- 💾 Create, Save, & Share Custom Presets
- 🔄 Edit, Resubmit, and Continue messages with conversation branching
- 📤 Export conversations as screenshots, markdown, text, json.
- 🔍 Search all messages/conversations
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options
- 📖 Completely Open-Source & Built in Public
- 🧑‍🤝‍🧑 Community-driven development, support, and feedback
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and 11-2023 updates
- 💬 Multimodal Chat:
- Upload and analyze images with GPT-4 and Gemini Vision 📸
- More filetypes and Assistants API integration in Active Development 🚧
- 🌎 Multilingual UI:
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands
- 🤖 AI model selection: OpenAI API, Azure, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins
- 💾 Create, Save, & Share Custom Presets
- 🔄 Edit, Resubmit, and Continue messages with conversation branching
- 📤 Export conversations as screenshots, markdown, text, json.
- 🔍 Search all messages/conversations
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
- ⚙️ Configure Proxy, Reverse Proxy, Docker, many Deployment options, and completely Open-Source
[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
## 🪶 All-In-One AI Conversations with LibreChat
## 🪶 All-In-One AI Conversations with LibreChat
LibreChat brings together the future of assistant AIs with the revolutionary technology of OpenAI's ChatGPT. Celebrating the original styling, LibreChat gives you the ability to integrate multiple AI models. It also integrates and enhances original client features such as conversation and message search, prompt templates and plugins.
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
<!-- https://github.com/danny-avila/LibreChat/assets/110412045/c1eb0c0f-41f6-4335-b982-84b278b53d59 -->
[![Watch the video](https://img.youtube.com/vi/pNIOs1ovsXw/maxresdefault.jpg)](https://youtu.be/pNIOs1ovsXw)
@@ -77,13 +71,11 @@ Click on the thumbnail to open the video☝
---
## 📚 Documentation
For more information on how to use our advanced features, install and configure our software, and access our guidelines and tutorials, please check out our documentation at [docs.librechat.ai](https://docs.librechat.ai)
---
## 📝 Changelog
## 📝 Changelog
Keep up with the latest updates by visiting the releases page - [Releases](https://github.com/danny-avila/LibreChat/releases)
**⚠️ [Breaking Changes](docs/general_info/breaking_changes.md)**
@@ -104,15 +96,14 @@ Please consult the breaking changes before updating.
---
## ✨ Contributions
Contributions, suggestions, bug reports and fixes are welcome!
For new features, components, or extensions, please open an issue and discuss before sending a PR.
For new features, components, or extensions, please open an issue and discuss before sending a PR.
---
## 💖 This project exists in its current state thanks to all the people who contribute
💖 This project exists in its current state thanks to all the people who contribute
---
<a href="https://github.com/danny-avila/LibreChat/graphs/contributors">
<img src="https://contrib.rocks/image?repo=danny-avila/LibreChat" />
</a>

View File

@@ -1,6 +1,5 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { EModelEndpoint } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('~/server/services/UserService');
const { logger } = require('~/config');
@@ -24,7 +23,10 @@ const askBing = async ({
let key = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.bingAI);
checkUserKeyExpiry(
expiresAt,
'Your BingAI Cookies have expired. Please provide your cookies again.',
);
key = await getUserKey({ userId, name: 'bingAI' });
}

View File

@@ -1,6 +1,6 @@
require('dotenv').config();
const { KeyvFile } = require('keyv-file');
const { Constants, EModelEndpoint } = require('librechat-data-provider');
const { Constants } = require('librechat-data-provider');
const { getUserKey, checkUserKeyExpiry } = require('../server/services/UserService');
const browserClient = async ({
@@ -18,7 +18,10 @@ const browserClient = async ({
let key = null;
if (expiresAt && isUserProvided) {
checkUserKeyExpiry(expiresAt, EModelEndpoint.chatGPTBrowser);
checkUserKeyExpiry(
expiresAt,
'Your ChatGPT Access Token has expired. Please provide your token again.',
);
key = await getUserKey({ userId, name: 'chatGPTBrowser' });
}

View File

@@ -1,19 +1,6 @@
const Anthropic = require('@anthropic-ai/sdk');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
getResponseSender,
EModelEndpoint,
validateVisionModel,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const {
titleFunctionPrompt,
parseTitleFromPrompt,
truncateText,
formatMessage,
createContextHandlers,
} = require('./prompts');
const spendTokens = require('~/models/spendTokens');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
const { getModelMaxTokens } = require('~/utils');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -23,20 +10,12 @@ const AI_PROMPT = '\n\nAssistant:';
const tokenizersCache = {};
/** Helper function to introduce a delay before retrying */
function delayBeforeRetry(attempts, baseDelay = 1000) {
return new Promise((resolve) => setTimeout(resolve, baseDelay * attempts));
}
class AnthropicClient extends BaseClient {
constructor(apiKey, options = {}) {
super(apiKey, options);
this.apiKey = apiKey || process.env.ANTHROPIC_API_KEY;
this.userLabel = HUMAN_PROMPT;
this.assistantLabel = AI_PROMPT;
this.contextStrategy = options.contextStrategy
? options.contextStrategy.toLowerCase()
: 'discard';
this.setOptions(options);
}
@@ -68,12 +47,6 @@ class AnthropicClient extends BaseClient {
stop: modelOptions.stop, // no stop method for now
};
this.isClaude3 = this.modelOptions.model.includes('claude-3');
this.useMessages = this.isClaude3 || !!this.options.attachments;
this.defaultVisionModel = this.options.visionModel ?? 'claude-3-sonnet-20240229';
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
this.maxContextTokens =
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000;
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
@@ -114,12 +87,7 @@ class AnthropicClient extends BaseClient {
return this;
}
/**
* Get the initialized Anthropic client.
* @returns {Anthropic} The Anthropic client instance.
*/
getClient() {
/** @type {Anthropic.default.RequestOptions} */
const options = {
apiKey: this.apiKey,
};
@@ -131,75 +99,6 @@ class AnthropicClient extends BaseClient {
return new Anthropic(options);
}
getTokenCountForResponse(response) {
return this.getTokenCountForMessage({
role: 'assistant',
content: response.text,
});
}
/**
*
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
const availableModels = this.options.modelsConfig?.[EModelEndpoint.anthropic];
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
if (
attachments &&
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
visionModelAvailable &&
!this.isVisionModel
) {
this.modelOptions.model = this.defaultVisionModel;
this.isVisionModel = true;
}
}
/**
* Calculate the token cost in tokens for an image based on its dimensions and detail level.
*
* For reference, see: https://docs.anthropic.com/claude/docs/vision#image-costs
*
* @param {Object} image - The image object.
* @param {number} image.width - The width of the image.
* @param {number} image.height - The height of the image.
* @returns {number} The calculated token cost measured by tokens.
*
*/
calculateImageTokenCost({ width, height }) {
return Math.ceil((width * height) / 750);
}
async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(
this.options.req,
attachments,
EModelEndpoint.anthropic,
);
message.image_urls = image_urls.length ? image_urls : undefined;
return files;
}
async recordTokenUsage({ promptTokens, completionTokens, model, context = 'message' }) {
await spendTokens(
{
context,
user: this.user,
conversationId: this.conversationId,
model: model ?? this.modelOptions.model,
endpointTokenConfig: this.options.endpointTokenConfig,
},
{ promptTokens, completionTokens },
);
}
async buildMessages(messages, parentMessageId) {
const orderedMessages = this.constructor.getMessagesForConversation({
messages,
@@ -208,145 +107,28 @@ class AnthropicClient extends BaseClient {
logger.debug('[AnthropicClient] orderedMessages', { orderedMessages, parentMessageId });
if (this.options.attachments) {
const attachments = await this.options.attachments;
const images = attachments.filter((file) => file.type.includes('image'));
if (images.length && !this.isVisionModel) {
throw new Error('Images are only supported with the Claude 3 family of models');
}
const latestMessage = orderedMessages[orderedMessages.length - 1];
if (this.message_file_map) {
this.message_file_map[latestMessage.messageId] = attachments;
} else {
this.message_file_map = {
[latestMessage.messageId]: attachments,
};
}
const files = await this.addImageURLs(latestMessage, attachments);
this.options.attachments = files;
}
if (this.message_file_map) {
this.contextHandlers = createContextHandlers(
this.options.req,
orderedMessages[orderedMessages.length - 1].text,
);
}
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = this.useMessages
? formatMessage({
message,
endpoint: EModelEndpoint.anthropic,
})
: {
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
content: message?.content ?? message.text,
};
const needsTokenCount = this.contextStrategy && !orderedMessages[i].tokenCount;
/* If tokens were never counted, or, is a Vision request and the message has files, count again */
if (needsTokenCount || (this.isVisionModel && (message.image_urls || message.files))) {
orderedMessages[i].tokenCount = this.getTokenCountForMessage(formattedMessage);
}
/* If message has files, calculate image token cost */
if (this.message_file_map && this.message_file_map[message.messageId]) {
const attachments = this.message_file_map[message.messageId];
for (const file of attachments) {
if (file.embedded) {
this.contextHandlers?.processFile(file);
continue;
}
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
height: file.height,
});
}
}
formattedMessage.tokenCount = orderedMessages[i].tokenCount;
return formattedMessage;
});
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
this.options.promptPrefix = this.augmentedPrompt + (this.options.promptPrefix ?? '');
}
let { context: messagesInWindow, remainingContextTokens } =
await this.getMessagesWithinTokenLimit(formattedMessages);
const tokenCountMap = orderedMessages
.slice(orderedMessages.length - messagesInWindow.length)
.reduce((map, message, index) => {
const { messageId } = message;
if (!messageId) {
return map;
}
map[messageId] = orderedMessages[index].tokenCount;
return map;
}, {});
logger.debug('[AnthropicClient]', {
messagesInWindow: messagesInWindow.length,
remainingContextTokens,
});
const formattedMessages = orderedMessages.map((message) => ({
author: message.isCreatedByUser ? this.userLabel : this.assistantLabel,
content: message?.content ?? message.text,
}));
let lastAuthor = '';
let groupedMessages = [];
for (let i = 0; i < messagesInWindow.length; i++) {
const message = messagesInWindow[i];
const author = message.role ?? message.author;
for (let message of formattedMessages) {
// If last author is not same as current author, add to new group
if (lastAuthor !== author) {
const newMessage = {
if (lastAuthor !== message.author) {
groupedMessages.push({
author: message.author,
content: [message.content],
};
if (message.role) {
newMessage.role = message.role;
} else {
newMessage.author = message.author;
}
groupedMessages.push(newMessage);
lastAuthor = author;
});
lastAuthor = message.author;
// If same author, append content to the last group
} else {
groupedMessages[groupedMessages.length - 1].content.push(message.content);
}
}
groupedMessages = groupedMessages.map((msg, i) => {
const isLast = i === groupedMessages.length - 1;
if (msg.content.length === 1) {
const content = msg.content[0];
return {
...msg,
// reason: final assistant content cannot end with trailing whitespace
content:
isLast && this.useMessages && msg.role === 'assistant' && typeof content === 'string'
? content?.trim()
: content,
};
}
if (!this.useMessages && msg.tokenCount) {
delete msg.tokenCount;
}
return msg;
});
let identityPrefix = '';
if (this.options.userLabel) {
identityPrefix = `\nHuman's name: ${this.options.userLabel}`;
@@ -372,10 +154,9 @@ class AnthropicClient extends BaseClient {
// Prompt AI to respond, empty if last message was from AI
let isEdited = lastAuthor === this.assistantLabel;
const promptSuffix = isEdited ? '' : `${promptPrefix}${this.assistantLabel}\n`;
let currentTokenCount =
isEdited || this.useMessages
? this.getTokenCount(promptPrefix)
: this.getTokenCount(promptSuffix);
let currentTokenCount = isEdited
? this.getTokenCount(promptPrefix)
: this.getTokenCount(promptSuffix);
let promptBody = '';
const maxTokenCount = this.maxPromptTokens;
@@ -443,69 +224,7 @@ class AnthropicClient extends BaseClient {
return true;
};
const messagesPayload = [];
const buildMessagesPayload = async () => {
let canContinue = true;
if (promptPrefix) {
this.systemMessage = promptPrefix;
}
while (currentTokenCount < maxTokenCount && groupedMessages.length > 0 && canContinue) {
const message = groupedMessages.pop();
let tokenCountForMessage = message.tokenCount ?? this.getTokenCountForMessage(message);
const newTokenCount = currentTokenCount + tokenCountForMessage;
const exceededMaxCount = newTokenCount > maxTokenCount;
if (exceededMaxCount && messagesPayload.length === 0) {
throw new Error(
`Prompt is too long. Max token count is ${maxTokenCount}, but prompt is ${newTokenCount} tokens long.`,
);
} else if (exceededMaxCount) {
canContinue = false;
break;
}
delete message.tokenCount;
messagesPayload.unshift(message);
currentTokenCount = newTokenCount;
// Switch off isEdited after using it once
if (isEdited && message.role === 'assistant') {
isEdited = false;
}
// Wait for next tick to avoid blocking the event loop
await new Promise((resolve) => setImmediate(resolve));
}
};
const processTokens = () => {
// Add 2 tokens for metadata after all messages have been counted.
currentTokenCount += 2;
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
this.modelOptions.maxOutputTokens = Math.min(
this.maxContextTokens - currentTokenCount,
this.maxResponseTokens,
);
};
if (this.modelOptions.model.startsWith('claude-3')) {
await buildMessagesPayload();
processTokens();
return {
prompt: messagesPayload,
context: messagesInWindow,
promptTokens: currentTokenCount,
tokenCountMap,
};
} else {
await buildPromptBody();
processTokens();
}
await buildPromptBody();
if (nextMessage.remove) {
promptBody = promptBody.replace(nextMessage.messageString, '');
@@ -515,26 +234,22 @@ class AnthropicClient extends BaseClient {
let prompt = `${promptBody}${promptSuffix}`;
return { prompt, context, promptTokens: currentTokenCount, tokenCountMap };
// Add 2 tokens for metadata after all messages have been counted.
currentTokenCount += 2;
// Use up to `this.maxContextTokens` tokens (prompt + response), but try to leave `this.maxTokens` tokens for the response.
this.modelOptions.maxOutputTokens = Math.min(
this.maxContextTokens - currentTokenCount,
this.maxResponseTokens,
);
return { prompt, context };
}
getCompletion() {
logger.debug('AnthropicClient doesn\'t use getCompletion (all handled in sendCompletion)');
}
/**
* Creates a message or completion response using the Anthropic client.
* @param {Anthropic} client - The Anthropic client instance.
* @param {Anthropic.default.MessageCreateParams | Anthropic.default.CompletionCreateParams} options - The options for the message or completion.
* @param {boolean} useMessages - Whether to use messages or completions. Defaults to `this.useMessages`.
* @returns {Promise<Anthropic.default.Message | Anthropic.default.Completion>} The response from the Anthropic client.
*/
async createResponse(client, options, useMessages) {
return useMessages ?? this.useMessages
? await client.messages.create(options)
: await client.completions.create(options);
}
async sendCompletion(payload, { onProgress, abortController }) {
if (!abortController) {
abortController = new AbortController();
@@ -564,88 +279,36 @@ class AnthropicClient extends BaseClient {
topP: top_p,
topK: top_k,
} = this.modelOptions;
const requestOptions = {
prompt: payload,
model,
stream: stream || true,
max_tokens_to_sample: maxOutputTokens || 1500,
stop_sequences,
temperature,
metadata,
top_p,
top_k,
};
if (this.useMessages) {
requestOptions.messages = payload;
requestOptions.max_tokens = maxOutputTokens || 1500;
} else {
requestOptions.prompt = payload;
requestOptions.max_tokens_to_sample = maxOutputTokens || 1500;
}
if (this.systemMessage) {
requestOptions.system = this.systemMessage;
}
logger.debug('[AnthropicClient]', { ...requestOptions });
const response = await client.completions.create(requestOptions);
const handleChunk = (currentChunk) => {
if (currentChunk) {
text += currentChunk;
onProgress(currentChunk);
}
};
signal.addEventListener('abort', () => {
logger.debug('[AnthropicClient] message aborted!');
response.controller.abort();
});
const maxRetries = 3;
async function processResponse() {
let attempts = 0;
while (attempts < maxRetries) {
let response;
try {
response = await this.createResponse(client, requestOptions);
signal.addEventListener('abort', () => {
logger.debug('[AnthropicClient] message aborted!');
if (response.controller?.abort) {
response.controller.abort();
}
});
for await (const completion of response) {
// Handle each completion as before
if (completion?.delta?.text) {
handleChunk(completion.delta.text);
} else if (completion.completion) {
handleChunk(completion.completion);
}
}
// Successful processing, exit loop
break;
} catch (error) {
attempts += 1;
logger.warn(
`User: ${this.user} | Anthropic Request ${attempts} failed: ${error.message}`,
);
if (attempts < maxRetries) {
await delayBeforeRetry(attempts, 350);
} else {
throw new Error(`Operation failed after ${maxRetries} attempts: ${error.message}`);
}
} finally {
signal.removeEventListener('abort', () => {
logger.debug('[AnthropicClient] message aborted!');
if (response.controller?.abort) {
response.controller.abort();
}
});
}
}
for await (const completion of response) {
// Uncomment to debug message stream
// logger.debug(completion);
text += completion.completion;
onProgress(completion.completion);
}
await processResponse.bind(this)();
signal.removeEventListener('abort', () => {
logger.debug('[AnthropicClient] message aborted!');
response.controller.abort();
});
return text.trim();
}
@@ -654,7 +317,6 @@ class AnthropicClient extends BaseClient {
return {
promptPrefix: this.options.promptPrefix,
modelLabel: this.options.modelLabel,
resendFiles: this.options.resendFiles,
...this.modelOptions,
};
}
@@ -680,78 +342,6 @@ class AnthropicClient extends BaseClient {
getTokenCount(text) {
return this.gptEncoder.encode(text, 'all').length;
}
/**
* Generates a concise title for a conversation based on the user's input text and response.
* Involves sending a chat completion request with specific instructions for title generation.
*
* This function capitlizes on [Anthropic's function calling training](https://docs.anthropic.com/claude/docs/functions-external-tools).
*
* @param {Object} params - The parameters for the conversation title generation.
* @param {string} params.text - The user's input.
* @param {string} [params.responseText=''] - The AI's immediate response to the user.
*
* @returns {Promise<string | 'New Chat'>} A promise that resolves to the generated conversation title.
* In case of failure, it will return the default title, "New Chat".
*/
async titleConvo({ text, responseText = '' }) {
let title = 'New Chat';
const convo = `<initial_message>
${truncateText(text)}
</initial_message>
<response>
${JSON.stringify(truncateText(responseText))}
</response>`;
const { ANTHROPIC_TITLE_MODEL } = process.env ?? {};
const model = this.options.titleModel ?? ANTHROPIC_TITLE_MODEL ?? 'claude-3-haiku-20240307';
const system = titleFunctionPrompt;
const titleChatCompletion = async () => {
const content = `<conversation_context>
${convo}
</conversation_context>
Please generate a title for this conversation.`;
const titleMessage = { role: 'user', content };
const requestOptions = {
model,
temperature: 0.3,
max_tokens: 1024,
system,
stop_sequences: ['\n\nHuman:', '\n\nAssistant', '</function_calls>'],
messages: [titleMessage],
};
try {
const response = await this.createResponse(this.getClient(), requestOptions, true);
let promptTokens = response?.usage?.input_tokens;
let completionTokens = response?.usage?.output_tokens;
if (!promptTokens) {
promptTokens = this.getTokenCountForMessage(titleMessage);
promptTokens += this.getTokenCountForMessage({ role: 'system', content: system });
}
if (!completionTokens) {
completionTokens = this.getTokenCountForMessage(response.content[0]);
}
await this.recordTokenUsage({
model,
promptTokens,
completionTokens,
context: 'title',
});
const text = response.content[0].text;
title = parseTitleFromPrompt(text);
} catch (e) {
logger.error('[AnthropicClient] There was an issue generating the title', e);
}
};
await titleChatCompletion();
logger.debug('[AnthropicClient] Convo Title: ' + title);
return title;
}
}
module.exports = AnthropicClient;

View File

@@ -3,7 +3,6 @@ const { supportsBalanceCheck, Constants } = require('librechat-data-provider');
const { getConvo, getMessages, saveMessage, updateMessage, saveConvo } = require('~/models');
const { addSpaceIfNeeded, isEnabled } = require('~/server/utils');
const checkBalance = require('~/models/checkBalance');
const { getFiles } = require('~/models/File');
const TextStream = require('./TextStream');
const { logger } = require('~/config');
@@ -23,7 +22,7 @@ class BaseClient {
throw new Error('Method \'setOptions\' must be implemented.');
}
async getCompletion() {
getCompletion() {
throw new Error('Method \'getCompletion\' must be implemented.');
}
@@ -47,6 +46,10 @@ class BaseClient {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', response);
}
async addPreviousAttachments(messages) {
return messages;
}
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('`[BaseClient] recordTokenUsage` not implemented.', {
promptTokens,
@@ -444,8 +447,6 @@ class BaseClient {
}
const completion = await this.sendCompletion(payload, opts);
this.abortController.requestCompleted = true;
const responseMessage = {
messageId: responseMessageId,
conversationId,
@@ -456,7 +457,6 @@ class BaseClient {
sender: this.sender,
text: addSpaceIfNeeded(generation) + completion,
promptTokens,
...(this.metadata ?? {}),
};
if (
@@ -681,54 +681,6 @@ class BaseClient {
return await this.sendCompletion(payload, opts);
}
/**
*
* @param {TMessage[]} _messages
* @returns {Promise<TMessage[]>}
*/
async addPreviousAttachments(_messages) {
if (!this.options.resendFiles) {
return _messages;
}
/**
*
* @param {TMessage} message
*/
const processMessage = async (message) => {
if (!this.message_file_map) {
/** @type {Record<string, MongoFile[]> */
this.message_file_map = {};
}
const fileIds = message.files.map((file) => file.file_id);
const files = await getFiles({
file_id: { $in: fileIds },
});
await this.addImageURLs(message, files);
this.message_file_map[message.messageId] = files;
return message;
};
const promises = [];
for (const message of _messages) {
if (!message.files) {
promises.push(message);
continue;
}
promises.push(processMessage(message));
}
const messages = await Promise.all(promises);
this.checkVisionRequest(Object.values(this.message_file_map ?? {}).flat());
return messages;
}
}
module.exports = BaseClient;

View File

@@ -1,19 +1,9 @@
const Keyv = require('keyv');
const crypto = require('crypto');
const {
EModelEndpoint,
resolveHeaders,
CohereConstants,
mapModelToAzureConfig,
} = require('librechat-data-provider');
const { CohereClient } = require('cohere-ai');
const Keyv = require('keyv');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const { fetchEventSource } = require('@waylaidwanderer/fetch-event-source');
const { createCoherePayload } = require('./llm');
const { Agent, ProxyAgent } = require('undici');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
const { extractBaseURL, constructAzureURL, genAzureChatCompletion } = require('~/utils');
const CHATGPT_MODEL = 'gpt-3.5-turbo';
const tokenizersCache = {};
@@ -150,13 +140,11 @@ class ChatGPTClient extends BaseClient {
return tokenizer;
}
/** @type {getCompletion} */
async getCompletion(input, onProgress, onTokenProgress, abortController = null) {
async getCompletion(input, onProgress, abortController = null) {
if (!abortController) {
abortController = new AbortController();
}
let modelOptions = { ...this.modelOptions };
const modelOptions = { ...this.modelOptions };
if (typeof onProgress === 'function') {
modelOptions.stream = true;
}
@@ -171,176 +159,56 @@ class ChatGPTClient extends BaseClient {
}
const { debug } = this.options;
let baseURL = this.completionsUrl;
const url = this.completionsUrl;
if (debug) {
console.debug();
console.debug(baseURL);
console.debug(url);
console.debug(modelOptions);
console.debug();
}
if (this.azure || this.options.azure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
}
const opts = {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(modelOptions),
dispatcher: new Agent({
bodyTimeout: 0,
headersTimeout: 0,
}),
};
if (this.isVisionModel) {
modelOptions.max_tokens = 4000;
}
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
const isAzure = this.azure || this.options.azure;
if (
(isAzure && this.isVisionModel && azureConfig) ||
(azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName: modelOptions.model,
modelGroupMap,
groupMap,
});
opts.headers = resolveHeaders(headers);
this.langchainProxy = extractBaseURL(baseURL);
this.apiKey = azureOptions.azureOpenAIApiKey;
const groupName = modelGroupMap[modelOptions.model].group;
this.options.addParams = azureConfig.groupMap[groupName].addParams;
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// Note: `forcePrompt` not re-assigned as only chat models are vision models
this.azure = !serverless && azureOptions;
this.azureEndpoint =
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
}
if (this.options.headers) {
opts.headers = { ...opts.headers, ...this.options.headers };
}
if (isAzure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
baseURL = this.langchainProxy
? constructAzureURL({
baseURL: this.langchainProxy,
azureOptions: this.azure,
})
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
if (this.options.forcePrompt) {
baseURL += '/completions';
} else {
baseURL += '/chat/completions';
}
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
opts.headers = { ...opts.headers, 'api-key': this.apiKey };
if (this.apiKey && this.options.azure) {
opts.headers['api-key'] = this.apiKey;
} else if (this.apiKey) {
opts.headers.Authorization = `Bearer ${this.apiKey}`;
}
if (process.env.OPENAI_ORGANIZATION) {
opts.headers['OpenAI-Organization'] = process.env.OPENAI_ORGANIZATION;
}
if (this.useOpenRouter) {
opts.headers['HTTP-Referer'] = 'https://librechat.ai';
opts.headers['X-Title'] = 'LibreChat';
}
if (this.options.headers) {
opts.headers = { ...opts.headers, ...this.options.headers };
}
if (this.options.proxy) {
opts.dispatcher = new ProxyAgent(this.options.proxy);
}
/* hacky fixes for Mistral AI API:
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
- If there is only one message and it's a system message, change the role to user
*/
if (baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
const { messages } = modelOptions;
const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system');
if (systemMessageIndex > 0) {
const [systemMessage] = messages.splice(systemMessageIndex, 1);
messages.unshift(systemMessage);
}
modelOptions.messages = messages;
if (messages.length === 1 && messages[0].role === 'system') {
modelOptions.messages[0].role = 'user';
}
}
if (this.options.addParams && typeof this.options.addParams === 'object') {
modelOptions = {
...modelOptions,
...this.options.addParams,
};
logger.debug('[ChatGPTClient] chatCompletion: added params', {
addParams: this.options.addParams,
modelOptions,
});
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
logger.debug('[ChatGPTClient] chatCompletion: dropped params', {
dropParams: this.options.dropParams,
modelOptions,
});
}
if (baseURL.startsWith(CohereConstants.API_URL)) {
const payload = createCoherePayload({ modelOptions });
return await this.cohereChatCompletion({ payload, onTokenProgress });
}
if (baseURL.includes('v1') && !baseURL.includes('/completions') && !this.isChatCompletion) {
baseURL = baseURL.split('v1')[0] + 'v1/completions';
} else if (
baseURL.includes('v1') &&
!baseURL.includes('/chat/completions') &&
this.isChatCompletion
) {
baseURL = baseURL.split('v1')[0] + 'v1/chat/completions';
}
const BASE_URL = new URL(baseURL);
if (opts.defaultQuery) {
Object.entries(opts.defaultQuery).forEach(([key, value]) => {
BASE_URL.searchParams.append(key, value);
});
delete opts.defaultQuery;
}
const completionsURL = BASE_URL.toString();
opts.body = JSON.stringify(modelOptions);
if (modelOptions.stream) {
// eslint-disable-next-line no-async-promise-executor
return new Promise(async (resolve, reject) => {
try {
let done = false;
await fetchEventSource(completionsURL, {
await fetchEventSource(url, {
...opts,
signal: abortController.signal,
async onopen(response) {
@@ -368,6 +236,7 @@ class ChatGPTClient extends BaseClient {
// workaround for private API not sending [DONE] event
if (!done) {
onProgress('[DONE]');
abortController.abort();
resolve();
}
},
@@ -380,13 +249,14 @@ class ChatGPTClient extends BaseClient {
},
onmessage(message) {
if (debug) {
console.debug(message);
// console.debug(message);
}
if (!message.data || message.event === 'ping') {
return;
}
if (message.data === '[DONE]') {
onProgress('[DONE]');
abortController.abort();
resolve();
done = true;
return;
@@ -399,7 +269,7 @@ class ChatGPTClient extends BaseClient {
}
});
}
const response = await fetch(completionsURL, {
const response = await fetch(url, {
...opts,
signal: abortController.signal,
});
@@ -417,35 +287,6 @@ class ChatGPTClient extends BaseClient {
return response.json();
}
/** @type {cohereChatCompletion} */
async cohereChatCompletion({ payload, onTokenProgress }) {
const cohere = new CohereClient({
token: this.apiKey,
environment: this.completionsUrl,
});
if (!payload.stream) {
const chatResponse = await cohere.chat(payload);
return chatResponse.text;
}
const chatStream = await cohere.chatStream(payload);
let reply = '';
for await (const message of chatStream) {
if (!message) {
continue;
}
if (message.eventType === 'text-generation' && message.text) {
onTokenProgress(message.text);
} else if (message.eventType === 'stream-end' && message.response) {
reply = message.response.text;
}
}
return reply;
}
async generateTitle(userMessage, botMessage) {
const instructionsPayload = {
role: 'system',

View File

@@ -1,23 +1,20 @@
const { google } = require('googleapis');
const { Agent, ProxyAgent } = require('undici');
const { ChatVertexAI } = require('@langchain/google-vertexai');
const { GoogleVertexAI } = require('langchain/llms/googlevertexai');
const { ChatGoogleGenerativeAI } = require('@langchain/google-genai');
const { GoogleGenerativeAI: GenAI } = require('@google/generative-ai');
const { GoogleVertexAI } = require('@langchain/community/llms/googlevertexai');
const { ChatGoogleVertexAI } = require('langchain/chat_models/googlevertexai');
const { AIMessage, HumanMessage, SystemMessage } = require('langchain/schema');
const { encodeAndFormat } = require('~/server/services/Files/images');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
validateVisionModel,
getResponseSender,
endpointSettings,
EModelEndpoint,
VisionModes,
AuthKeys,
} = require('librechat-data-provider');
const { encodeAndFormat } = require('~/server/services/Files/images');
const { formatMessage, createContextHandlers } = require('./prompts');
const { getModelMaxTokens } = require('~/utils');
const { formatMessage } = require('./prompts');
const BaseClient = require('./BaseClient');
const { logger } = require('~/config');
@@ -127,11 +124,18 @@ class GoogleClient extends BaseClient {
// stop: modelOptions.stop // no stop method for now
};
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
if (this.options.attachments) {
this.modelOptions.model = 'gemini-pro-vision';
}
/** @type {boolean} Whether using a "GenerativeAI" Model */
// TODO: as of 12/14/23, only gemini models are "Generative AI" models provided by Google
this.isGenerativeModel = this.modelOptions.model.includes('gemini');
this.isVisionModel = validateVisionModel(this.modelOptions.model);
const { isGenerativeModel } = this;
if (this.isVisionModel && !this.options.attachments) {
this.modelOptions.model = 'gemini-pro';
this.isVisionModel = false;
}
this.isChatModel = !isGenerativeModel && this.modelOptions.model.includes('chat');
const { isChatModel } = this;
this.isTextModel =
@@ -216,33 +220,6 @@ class GoogleClient extends BaseClient {
return this;
}
/**
*
* Checks if the model is a vision model based on request attachments and sets the appropriate options:
* @param {MongoFile[]} attachments
*/
checkVisionRequest(attachments) {
/* Validation vision request */
this.defaultVisionModel = this.options.visionModel ?? 'gemini-pro-vision';
const availableModels = this.options.modelsConfig?.[EModelEndpoint.google];
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
if (
attachments &&
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
availableModels?.includes(this.defaultVisionModel) &&
!this.isVisionModel
) {
this.modelOptions.model = this.defaultVisionModel;
this.isVisionModel = true;
}
if (this.isVisionModel && !attachments && this.modelOptions.model.includes('gemini-pro')) {
this.modelOptions.model = 'gemini-pro';
this.isVisionModel = false;
}
}
formatMessages() {
return ((message) => ({
author: message?.author ?? (message.isCreatedByUser ? this.userLabel : this.modelLabel),
@@ -250,91 +227,18 @@ class GoogleClient extends BaseClient {
})).bind(this);
}
/**
* Formats messages for generative AI
* @param {TMessage[]} messages
* @returns
*/
async formatGenerativeMessages(messages) {
const formattedMessages = [];
async buildVisionMessages(messages = [], parentMessageId) {
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
const files = await this.addImageURLs(latestMessage, attachments, VisionModes.generative);
this.options.attachments = files;
messages[messages.length - 1] = latestMessage;
for (const _message of messages) {
const role = _message.isCreatedByUser ? this.userLabel : this.modelLabel;
const parts = [];
parts.push({ text: _message.text });
if (!_message.image_urls?.length) {
formattedMessages.push({ role, parts });
continue;
}
for (const images of _message.image_urls) {
if (images.inlineData) {
parts.push({ inlineData: images.inlineData });
}
}
formattedMessages.push({ role, parts });
}
return formattedMessages;
}
/**
*
* Adds image URLs to the message object and returns the files
*
* @param {TMessage[]} messages
* @param {MongoFile[]} files
* @returns {Promise<MongoFile[]>}
*/
async addImageURLs(message, attachments, mode = '') {
const { files, image_urls } = await encodeAndFormat(
this.options.req,
attachments,
attachments.filter((file) => file.type.includes('image')),
EModelEndpoint.google,
mode,
);
message.image_urls = image_urls.length ? image_urls : undefined;
return files;
}
/**
* Builds the augmented prompt for attachments
* TODO: Add File API Support
* @param {TMessage[]} messages
*/
async buildAugmentedPrompt(messages = []) {
const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
this.contextHandlers = createContextHandlers(this.options.req, latestMessage.text);
if (this.contextHandlers) {
for (const file of attachments) {
if (file.embedded) {
this.contextHandlers?.processFile(file);
continue;
}
}
this.augmentedPrompt = await this.contextHandlers.createContext();
this.options.promptPrefix = this.augmentedPrompt + this.options.promptPrefix;
}
}
async buildVisionMessages(messages = [], parentMessageId) {
const attachments = await this.options.attachments;
const latestMessage = { ...messages[messages.length - 1] };
await this.buildAugmentedPrompt(messages);
const { prompt } = await this.buildMessagesPrompt(messages, parentMessageId);
const files = await this.addImageURLs(latestMessage, attachments);
latestMessage.image_urls = image_urls;
this.options.attachments = files;
latestMessage.text = prompt;
@@ -350,29 +254,18 @@ class GoogleClient extends BaseClient {
return { prompt: payload };
}
/** @param {TMessage[]} [messages=[]] */
async buildGenerativeMessages(messages = []) {
this.userLabel = 'user';
this.modelLabel = 'model';
const promises = [];
promises.push(await this.formatGenerativeMessages(messages));
promises.push(this.buildAugmentedPrompt(messages));
const [formattedMessages] = await Promise.all(promises);
return { prompt: formattedMessages };
}
async buildMessages(messages = [], parentMessageId) {
if (!this.isGenerativeModel && !this.project_id) {
throw new Error(
'[GoogleClient] a Service Account JSON Key is required for PaLM 2 and Codey models (Vertex AI)',
);
} else if (this.isGenerativeModel && (!this.apiKey || this.apiKey === 'user_provided')) {
throw new Error(
'[GoogleClient] an API Key is required for Gemini models (Generative Language API)',
);
}
if (!this.project_id && this.modelOptions.model.includes('1.5')) {
return await this.buildGenerativeMessages(messages);
}
if (this.options.attachments && this.isGenerativeModel) {
if (this.options.attachments) {
return this.buildVisionMessages(messages, parentMessageId);
}
@@ -586,24 +479,13 @@ class GoogleClient extends BaseClient {
}
createLLM(clientOptions) {
const model = clientOptions.modelName ?? clientOptions.model;
if (this.project_id && this.isTextModel) {
return new GoogleVertexAI(clientOptions);
} else if (this.project_id && this.isChatModel) {
return new ChatGoogleVertexAI(clientOptions);
} else if (this.project_id) {
return new ChatVertexAI(clientOptions);
} else if (model.includes('1.5')) {
return new GenAI(this.apiKey).getGenerativeModel(
{
...clientOptions,
model,
},
{ apiVersion: 'v1beta' },
);
if (this.isGenerativeModel) {
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
}
return new ChatGoogleGenerativeAI({ ...clientOptions, apiKey: this.apiKey });
return this.isTextModel
? new GoogleVertexAI(clientOptions)
: new ChatGoogleVertexAI(clientOptions);
}
async getCompletion(_payload, options = {}) {
@@ -615,7 +497,7 @@ class GoogleClient extends BaseClient {
let clientOptions = { ...parameters, maxRetries: 2 };
if (this.project_id) {
if (!this.isGenerativeModel) {
clientOptions['authOptions'] = {
credentials: {
...this.serviceKey,
@@ -628,7 +510,7 @@ class GoogleClient extends BaseClient {
clientOptions = { ...clientOptions, ...this.modelOptions };
}
if (this.isGenerativeModel && !this.project_id) {
if (this.isGenerativeModel) {
clientOptions.modelName = clientOptions.model;
delete clientOptions.model;
}
@@ -659,46 +541,16 @@ class GoogleClient extends BaseClient {
messages.unshift(new SystemMessage(context));
}
const modelName = clientOptions.modelName ?? clientOptions.model ?? '';
if (modelName?.includes('1.5') && !this.project_id) {
/** @type {GenerativeModel} */
const client = model;
const requestOptions = {
contents: _payload,
};
if (this.options?.promptPrefix?.length) {
requestOptions.systemInstruction = {
parts: [
{
text: this.options.promptPrefix,
},
],
};
}
const result = await client.generateContentStream(requestOptions);
for await (const chunk of result.stream) {
const chunkText = chunk.text();
this.generateTextStream(chunkText, onProgress, {
delay: 12,
});
reply += chunkText;
}
return reply;
}
const stream = await model.stream(messages, {
signal: abortController.signal,
timeout: 7000,
});
for await (const chunk of stream) {
const chunkText = chunk?.content ?? chunk;
this.generateTextStream(chunkText, onProgress, {
await this.generateTextStream(chunk?.content ?? chunk, onProgress, {
delay: this.isGenerativeModel ? 12 : 8,
});
reply += chunkText;
reply += chunk?.content ?? chunk;
}
return reply;

View File

@@ -1,14 +1,10 @@
const OpenAI = require('openai');
const { HttpsProxyAgent } = require('https-proxy-agent');
const {
ImageDetail,
EModelEndpoint,
resolveHeaders,
ImageDetailCost,
CohereConstants,
getResponseSender,
validateVisionModel,
mapModelToAzureConfig,
ImageDetailCost,
ImageDetail,
} = require('librechat-data-provider');
const { encoding_for_model: encodingForModel, get_encoding: getEncoding } = require('tiktoken');
const {
@@ -17,19 +13,14 @@ const {
getModelMaxTokens,
genAzureChatCompletion,
} = require('~/utils');
const {
truncateText,
formatMessage,
createContextHandlers,
CUT_OFF_PROMPT,
titleInstruction,
} = require('./prompts');
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
const { truncateText, formatMessage, CUT_OFF_PROMPT } = require('./prompts');
const { handleOpenAIErrors } = require('./tools/util');
const spendTokens = require('~/models/spendTokens');
const { createLLM, RunManager } = require('./llm');
const ChatGPTClient = require('./ChatGPTClient');
const { isEnabled } = require('~/server/utils');
const { getFiles } = require('~/models/File');
const { summaryBuffer } = require('./memory');
const { runTitleChain } = require('./chains');
const { tokenSplit } = require('./document');
@@ -46,10 +37,7 @@ class OpenAIClient extends BaseClient {
super(apiKey, options);
this.ChatGPTClient = new ChatGPTClient();
this.buildPrompt = this.ChatGPTClient.buildPrompt.bind(this);
/** @type {getCompletion} */
this.getCompletion = this.ChatGPTClient.getCompletion.bind(this);
/** @type {cohereChatCompletion} */
this.cohereChatCompletion = this.ChatGPTClient.cohereChatCompletion.bind(this);
this.contextStrategy = options.contextStrategy
? options.contextStrategy.toLowerCase()
: 'discard';
@@ -57,10 +45,6 @@ class OpenAIClient extends BaseClient {
/** @type {AzureOptions} */
this.azure = options.azure || false;
this.setOptions(options);
this.metadata = {};
/** @type {string | undefined} - The API Completions URL */
this.completionsUrl;
}
// TODO: PluginsClient calls this 3x, unneeded
@@ -104,12 +88,7 @@ class OpenAIClient extends BaseClient {
};
}
this.defaultVisionModel = this.options.visionModel ?? 'gpt-4-vision-preview';
if (typeof this.options.attachments?.then === 'function') {
this.options.attachments.then((attachments) => this.checkVisionRequest(attachments));
} else {
this.checkVisionRequest(this.options.attachments);
}
this.checkVisionRequest(this.options.attachments);
const { OPENROUTER_API_KEY, OPENAI_FORCE_PROMPT } = process.env ?? {};
if (OPENROUTER_API_KEY && !this.azure) {
@@ -240,20 +219,13 @@ class OpenAIClient extends BaseClient {
* - Sets `this.modelOptions.model` to `gpt-4-vision-preview` if the request is a vision request.
* - Sets `this.isVisionModel` to `true` if vision request.
* - Deletes `this.modelOptions.stop` if vision request.
* @param {MongoFile[]} attachments
* @param {Array<Promise<MongoFile[]> | MongoFile[]> | Record<string, MongoFile[]>} attachments
*/
checkVisionRequest(attachments) {
const availableModels = this.options.modelsConfig?.[this.options.endpoint];
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
this.isVisionModel = validateVisionModel(this.modelOptions.model);
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
if (
attachments &&
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
visionModelAvailable &&
!this.isVisionModel
) {
this.modelOptions.model = this.defaultVisionModel;
if (attachments && !this.isVisionModel) {
this.modelOptions.model = 'gpt-4-vision-preview';
this.isVisionModel = true;
}
@@ -388,7 +360,7 @@ class OpenAIClient extends BaseClient {
return {
chatGptLabel: this.options.chatGptLabel,
promptPrefix: this.options.promptPrefix,
resendFiles: this.options.resendFiles,
resendImages: this.options.resendImages,
imageDetail: this.options.imageDetail,
...this.modelOptions,
};
@@ -402,6 +374,54 @@ class OpenAIClient extends BaseClient {
};
}
/**
*
* @param {TMessage[]} _messages
* @returns {TMessage[]}
*/
async addPreviousAttachments(_messages) {
if (!this.options.resendImages) {
return _messages;
}
/**
*
* @param {TMessage} message
*/
const processMessage = async (message) => {
if (!this.message_file_map) {
/** @type {Record<string, MongoFile[]> */
this.message_file_map = {};
}
const fileIds = message.files.map((file) => file.file_id);
const files = await getFiles({
file_id: { $in: fileIds },
});
await this.addImageURLs(message, files);
this.message_file_map[message.messageId] = files;
return message;
};
const promises = [];
for (const message of _messages) {
if (!message.files) {
promises.push(message);
continue;
}
promises.push(processMessage(message));
}
const messages = await Promise.all(promises);
this.checkVisionRequest(this.message_file_map);
return messages;
}
/**
*
* Adds image URLs to the message object and returns the files
@@ -412,7 +432,8 @@ class OpenAIClient extends BaseClient {
*/
async addImageURLs(message, attachments) {
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments);
message.image_urls = image_urls.length ? image_urls : undefined;
message.image_urls = image_urls;
return files;
}
@@ -440,9 +461,23 @@ class OpenAIClient extends BaseClient {
let promptTokens;
promptPrefix = (promptPrefix || this.options.promptPrefix || '').trim();
if (promptPrefix) {
promptPrefix = `Instructions:\n${promptPrefix}`;
instructions = {
role: 'system',
name: 'instructions',
content: promptPrefix,
};
if (this.contextStrategy) {
instructions.tokenCount = this.getTokenCountForMessage(instructions);
}
}
if (this.options.attachments) {
const attachments = await this.options.attachments;
const attachments = (await this.options.attachments).filter((file) =>
file.type.includes('image'),
);
if (this.message_file_map) {
this.message_file_map[orderedMessages[orderedMessages.length - 1].messageId] = attachments;
@@ -460,13 +495,6 @@ class OpenAIClient extends BaseClient {
this.options.attachments = files;
}
if (this.message_file_map) {
this.contextHandlers = createContextHandlers(
this.options.req,
orderedMessages[orderedMessages.length - 1].text,
);
}
const formattedMessages = orderedMessages.map((message, i) => {
const formattedMessage = formatMessage({
message,
@@ -485,11 +513,6 @@ class OpenAIClient extends BaseClient {
if (this.message_file_map && this.message_file_map[message.messageId]) {
const attachments = this.message_file_map[message.messageId];
for (const file of attachments) {
if (file.embedded) {
this.contextHandlers?.processFile(file);
continue;
}
orderedMessages[i].tokenCount += this.calculateImageTokenCost({
width: file.width,
height: file.height,
@@ -501,24 +524,6 @@ class OpenAIClient extends BaseClient {
return formattedMessage;
});
if (this.contextHandlers) {
this.augmentedPrompt = await this.contextHandlers.createContext();
promptPrefix = this.augmentedPrompt + promptPrefix;
}
if (promptPrefix) {
promptPrefix = `Instructions:\n${promptPrefix.trim()}`;
instructions = {
role: 'system',
name: 'instructions',
content: promptPrefix,
};
if (this.contextStrategy) {
instructions.tokenCount = this.getTokenCountForMessage(instructions);
}
}
// TODO: need to handle interleaving instructions better
if (this.contextStrategy) {
({ payload, tokenCountMap, promptTokens, messages } = await this.handleContextStrategy({
@@ -546,16 +551,15 @@ class OpenAIClient extends BaseClient {
return result;
}
/** @type {sendCompletion} */
async sendCompletion(payload, opts = {}) {
let reply = '';
let result = null;
let streamResult = null;
this.modelOptions.user = this.user;
const invalidBaseUrl = this.completionsUrl && extractBaseURL(this.completionsUrl) === null;
const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion || typeof Bun !== 'undefined');
const useOldMethod = !!(invalidBaseUrl || !this.isChatCompletion);
if (typeof opts.onProgress === 'function' && useOldMethod) {
const completionResult = await this.getCompletion(
await this.getCompletion(
payload,
(progressMessage) => {
if (progressMessage === '[DONE]') {
@@ -588,16 +592,12 @@ class OpenAIClient extends BaseClient {
opts.onProgress(token);
reply += token;
},
opts.onProgress,
opts.abortController || new AbortController(),
);
if (completionResult && typeof completionResult === 'string') {
reply = completionResult;
}
} else if (typeof opts.onProgress === 'function' || this.options.useChatCompletion) {
reply = await this.chatCompletion({
payload,
clientOptions: opts,
onProgress: opts.onProgress,
abortController: opts.abortController,
});
@@ -605,14 +605,9 @@ class OpenAIClient extends BaseClient {
result = await this.getCompletion(
payload,
null,
opts.onProgress,
opts.abortController || new AbortController(),
);
if (result && typeof result === 'string') {
return result.trim();
}
logger.debug('[OpenAIClient] sendCompletion: result', result);
if (this.isChatCompletion) {
@@ -622,11 +617,11 @@ class OpenAIClient extends BaseClient {
}
}
if (streamResult) {
if (streamResult && typeof opts.addMetadata === 'function') {
const { finish_reason } = streamResult.choices[0];
this.metadata = { finish_reason };
opts.addMetadata({ finish_reason });
}
return (reply ?? '').trim();
return reply.trim();
}
initializeLLM({
@@ -670,16 +665,6 @@ class OpenAIClient extends BaseClient {
};
}
const { headers } = this.options;
if (headers && typeof headers === 'object' && !Array.isArray(headers)) {
configOptions.baseOptions = {
headers: resolveHeaders({
...headers,
...configOptions?.baseOptions?.headers,
}),
};
}
if (this.options.proxy) {
configOptions.httpAgent = new HttpsProxyAgent(this.options.proxy);
configOptions.httpsAgent = new HttpsProxyAgent(this.options.proxy);
@@ -740,39 +725,6 @@ class OpenAIClient extends BaseClient {
max_tokens: 16,
};
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
const resetTitleOptions = !!(
(this.azure && azureConfig) ||
(azureConfig && this.options.endpoint === EModelEndpoint.azureOpenAI)
);
if (resetTitleOptions) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName: modelOptions.model,
modelGroupMap,
groupMap,
});
this.options.headers = resolveHeaders(headers);
this.options.reverseProxyUrl = baseURL ?? null;
this.langchainProxy = extractBaseURL(this.options.reverseProxyUrl);
this.apiKey = azureOptions.azureOpenAIApiKey;
const groupName = modelGroupMap[modelOptions.model].group;
this.options.addParams = azureConfig.groupMap[groupName].addParams;
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
this.options.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
this.azure = !serverless && azureOptions;
}
const titleChatCompletion = async () => {
modelOptions.model = model;
@@ -784,7 +736,8 @@ class OpenAIClient extends BaseClient {
const instructionsPayload = [
{
role: 'system',
content: `Please generate ${titleInstruction}
content: `Detect user language and write in the same language an extremely concise title for this conversation, which you must accurately detect.
Write in the detected language. Title in 5 Words or Less. No Punctuation or Quotation. Do not mention the language. All first letters of every word should be capitalized and write the title in User Language only.
${convo}
@@ -792,18 +745,10 @@ ${convo}
},
];
const promptTokens = this.getTokenCountForMessage(instructionsPayload[0]);
try {
let useChatCompletion = true;
if (this.options.reverseProxyUrl === CohereConstants.API_URL) {
useChatCompletion = false;
}
title = (
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion })
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion: true })
).replaceAll('"', '');
const completionTokens = this.getTokenCount(title);
this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
} catch (e) {
logger.error(
'[OpenAIClient] There was an issue generating the title with the completion method',
@@ -955,12 +900,13 @@ ${convo}
}
}
async recordTokenUsage({ promptTokens, completionTokens, context = 'message' }) {
async recordTokenUsage({ promptTokens, completionTokens }) {
logger.debug('[OpenAIClient] recordTokenUsage:', { promptTokens, completionTokens });
await spendTokens(
{
context,
user: this.user,
model: this.modelOptions.model,
context: 'message',
conversationId: this.conversationId,
endpointTokenConfig: this.options.endpointTokenConfig,
},
@@ -975,7 +921,7 @@ ${convo}
});
}
async chatCompletion({ payload, onProgress, abortController = null }) {
async chatCompletion({ payload, onProgress, clientOptions, abortController = null }) {
let error = null;
const errorCallback = (err) => (error = err);
let intermediateReply = '';
@@ -996,6 +942,15 @@ ${convo}
}
const baseURL = extractBaseURL(this.completionsUrl);
// let { messages: _msgsToLog, ...modelOptionsToLog } = modelOptions;
// if (modelOptionsToLog.messages) {
// _msgsToLog = modelOptionsToLog.messages.map((msg) => {
// let { content, ...rest } = msg;
// if (content)
// return { ...rest, content: truncateText(content) };
// });
// }
logger.debug('[OpenAIClient] chatCompletion', { baseURL, modelOptions });
const opts = {
baseURL,
@@ -1020,38 +975,6 @@ ${convo}
modelOptions.max_tokens = 4000;
}
/** @type {TAzureConfig | undefined} */
const azureConfig = this.options?.req?.app?.locals?.[EModelEndpoint.azureOpenAI];
if (
(this.azure && this.isVisionModel && azureConfig) ||
(azureConfig && this.isVisionModel && this.options.endpoint === EModelEndpoint.azureOpenAI)
) {
const { modelGroupMap, groupMap } = azureConfig;
const {
azureOptions,
baseURL,
headers = {},
serverless,
} = mapModelToAzureConfig({
modelName: modelOptions.model,
modelGroupMap,
groupMap,
});
opts.defaultHeaders = resolveHeaders(headers);
this.langchainProxy = extractBaseURL(baseURL);
this.apiKey = azureOptions.azureOpenAIApiKey;
const groupName = modelGroupMap[modelOptions.model].group;
this.options.addParams = azureConfig.groupMap[groupName].addParams;
this.options.dropParams = azureConfig.groupMap[groupName].dropParams;
// Note: `forcePrompt` not re-assigned as only chat models are vision models
this.azure = !serverless && azureOptions;
this.azureEndpoint =
!serverless && genAzureChatCompletion(this.azure, modelOptions.model, this);
}
if (this.azure || this.options.azure) {
// Azure does not accept `model` in the body, so we need to remove it.
delete modelOptions.model;
@@ -1059,10 +982,9 @@ ${convo}
opts.baseURL = this.langchainProxy
? constructAzureURL({
baseURL: this.langchainProxy,
azureOptions: this.azure,
azure: this.azure,
})
: this.azureEndpoint.split(/(?<!\/)\/(chat|completion)\//)[0];
: this.azureEndpoint.split(/\/(chat|completion)/)[0];
opts.defaultQuery = { 'api-version': this.azure.azureOpenAIApiVersion };
opts.defaultHeaders = { ...opts.defaultHeaders, 'api-key': this.apiKey };
}
@@ -1072,7 +994,6 @@ ${convo}
}
let chatCompletion;
/** @type {OpenAI} */
const openai = new OpenAI({
apiKey: this.apiKey,
...opts,
@@ -1104,20 +1025,12 @@ ${convo}
...modelOptions,
...this.options.addParams,
};
logger.debug('[OpenAIClient] chatCompletion: added params', {
addParams: this.options.addParams,
modelOptions,
});
}
if (this.options.dropParams && Array.isArray(this.options.dropParams)) {
this.options.dropParams.forEach((param) => {
delete modelOptions[param];
});
logger.debug('[OpenAIClient] chatCompletion: dropped params', {
dropParams: this.options.dropParams,
modelOptions,
});
}
let UnexpectedRoleError = false;
@@ -1133,16 +1046,6 @@ ${convo}
.on('error', (err) => {
handleOpenAIErrors(err, errorCallback, 'stream');
})
.on('finalChatCompletion', (finalChatCompletion) => {
const finalMessage = finalChatCompletion?.choices?.[0]?.message;
if (finalMessage && finalMessage?.role !== 'assistant') {
finalChatCompletion.choices[0].message.role = 'assistant';
}
if (finalMessage && !finalMessage?.content?.trim()) {
finalChatCompletion.choices[0].message.content = intermediateReply;
}
})
.on('finalMessage', (message) => {
if (message?.role !== 'assistant') {
stream.messages.push({ role: 'assistant', content: intermediateReply });
@@ -1188,20 +1091,12 @@ ${convo}
}
const { message, finish_reason } = chatCompletion.choices[0];
if (chatCompletion) {
this.metadata = { finish_reason };
if (chatCompletion && typeof clientOptions.addMetadata === 'function') {
clientOptions.addMetadata({ finish_reason });
}
logger.debug('[OpenAIClient] chatCompletion response', chatCompletion);
if (!message?.content?.trim() && intermediateReply.length) {
logger.debug(
'[OpenAIClient] chatCompletion: using intermediateReply due to empty message.content',
{ intermediateReply },
);
return intermediateReply;
}
return message.content;
} catch (err) {
if (
@@ -1214,9 +1109,6 @@ ${convo}
err?.message?.includes(
'OpenAI error: Invalid final message: OpenAI expects final message to include role=assistant',
) ||
err?.message?.includes(
'stream ended without producing a ChatCompletionMessage with role=assistant',
) ||
err?.message?.includes('The server had an error processing your request') ||
err?.message?.includes('missing finish_reason') ||
err?.message?.includes('missing role') ||

View File

@@ -31,6 +31,10 @@ class PluginsClient extends OpenAIClient {
super.setOptions(options);
if (this.functionsAgent && this.agentOptions.model && !this.useOpenRouter && !this.azure) {
this.agentOptions.model = this.getFunctionModelName(this.agentOptions.model);
}
this.isGpt3 = this.modelOptions?.model?.includes('gpt-3');
if (this.options.reverseProxyUrl) {
@@ -244,7 +248,7 @@ class PluginsClient extends OpenAIClient {
this.setOptions(opts);
return super.sendMessage(message, opts);
}
logger.debug('[PluginsClient] sendMessage', { userMessageText: message, opts });
logger.debug('[PluginsClient] sendMessage', { message, opts });
const {
user,
isEdited,

View File

@@ -1,85 +0,0 @@
const { CohereConstants } = require('librechat-data-provider');
const { titleInstruction } = require('../prompts/titlePrompts');
// Mapping OpenAI roles to Cohere roles
const roleMap = {
user: CohereConstants.ROLE_USER,
assistant: CohereConstants.ROLE_CHATBOT,
system: CohereConstants.ROLE_SYSTEM, // Recognize and map the system role explicitly
};
/**
* Adjusts an OpenAI ChatCompletionPayload to conform with Cohere's expected chat payload format.
* Now includes handling for "system" roles explicitly mentioned.
*
* @param {Object} options - Object containing the model options.
* @param {ChatCompletionPayload} options.modelOptions - The OpenAI model payload options.
* @returns {CohereChatStreamRequest} Cohere-compatible chat API payload.
*/
function createCoherePayload({ modelOptions }) {
/** @type {string | undefined} */
let preamble;
let latestUserMessageContent = '';
const {
stream,
stop,
top_p,
temperature,
frequency_penalty,
presence_penalty,
max_tokens,
messages,
model,
...rest
} = modelOptions;
// Filter out the latest user message and transform remaining messages to Cohere's chat_history format
let chatHistory = messages.reduce((acc, message, index, arr) => {
const isLastUserMessage = index === arr.length - 1 && message.role === 'user';
const messageContent =
typeof message.content === 'string'
? message.content
: message.content.map((part) => (part.type === 'text' ? part.text : '')).join(' ');
if (isLastUserMessage) {
latestUserMessageContent = messageContent;
} else {
acc.push({
role: roleMap[message.role] || CohereConstants.ROLE_USER,
message: messageContent,
});
}
return acc;
}, []);
if (
chatHistory.length === 1 &&
chatHistory[0].role === CohereConstants.ROLE_SYSTEM &&
!latestUserMessageContent.length
) {
const message = chatHistory[0].message;
latestUserMessageContent = message.includes(titleInstruction)
? CohereConstants.TITLE_MESSAGE
: '.';
preamble = message;
}
return {
message: latestUserMessageContent,
model: model,
chatHistory,
stream: stream ?? false,
temperature: temperature,
frequencyPenalty: frequency_penalty,
presencePenalty: presence_penalty,
maxTokens: max_tokens,
stopSequences: stop,
preamble,
p: top_p,
...rest,
};
}
module.exports = createCoherePayload;

View File

@@ -55,18 +55,16 @@ function createLLM({
}
if (azure && configOptions.basePath) {
const azureURL = constructAzureURL({
configOptions.basePath = constructAzureURL({
baseURL: configOptions.basePath,
azureOptions,
azure: azureOptions,
});
azureOptions.azureOpenAIBasePath = azureURL.split(
`/${azureOptions.azureOpenAIApiDeploymentName}`,
)[0];
}
return new ChatOpenAI(
{
streaming,
verbose: true,
credentials,
configuration,
...azureOptions,

View File

@@ -1,9 +1,7 @@
const createLLM = require('./createLLM');
const RunManager = require('./RunManager');
const createCoherePayload = require('./createCoherePayload');
module.exports = {
createLLM,
RunManager,
createCoherePayload,
};

View File

@@ -1,159 +0,0 @@
const axios = require('axios');
const { isEnabled } = require('~/server/utils');
const { logger } = require('~/config');
const footer = `Use the context as your learned knowledge to better answer the user.
In your response, remember to follow these guidelines:
- If you don't know the answer, simply say that you don't know.
- If you are unsure how to answer, ask for clarification.
- Avoid mentioning that you obtained the information from the context.
Answer appropriately in the user's language.
`;
function createContextHandlers(req, userMessageContent) {
if (!process.env.RAG_API_URL) {
return;
}
const queryPromises = [];
const processedFiles = [];
const processedIds = new Set();
const jwtToken = req.headers.authorization.split(' ')[1];
const useFullContext = isEnabled(process.env.RAG_USE_FULL_CONTEXT);
const query = async (file) => {
if (useFullContext) {
return axios.get(`${process.env.RAG_API_URL}/documents/${file.file_id}/context`, {
headers: {
Authorization: `Bearer ${jwtToken}`,
},
});
}
return axios.post(
`${process.env.RAG_API_URL}/query`,
{
file_id: file.file_id,
query: userMessageContent,
k: 4,
},
{
headers: {
Authorization: `Bearer ${jwtToken}`,
'Content-Type': 'application/json',
},
},
);
};
const processFile = async (file) => {
if (file.embedded && !processedIds.has(file.file_id)) {
try {
const promise = query(file);
queryPromises.push(promise);
processedFiles.push(file);
processedIds.add(file.file_id);
} catch (error) {
logger.error(`Error processing file ${file.filename}:`, error);
}
}
};
const createContext = async () => {
try {
if (!queryPromises.length || !processedFiles.length) {
return '';
}
const oneFile = processedFiles.length === 1;
const header = `The user has attached ${oneFile ? 'a' : processedFiles.length} file${
!oneFile ? 's' : ''
} to the conversation:`;
const files = `${
oneFile
? ''
: `
<files>`
}${processedFiles
.map(
(file) => `
<file>
<filename>${file.filename}</filename>
<type>${file.type}</type>
</file>`,
)
.join('')}${
oneFile
? ''
: `
</files>`
}`;
const resolvedQueries = await Promise.all(queryPromises);
const context = resolvedQueries
.map((queryResult, index) => {
const file = processedFiles[index];
let contextItems = queryResult.data;
const generateContext = (currentContext) =>
`
<file>
<filename>${file.filename}</filename>
<context>${currentContext}
</context>
</file>`;
if (useFullContext) {
return generateContext(`\n${contextItems}`);
}
contextItems = queryResult.data
.map((item) => {
const pageContent = item[0].page_content;
return `
<contextItem>
<![CDATA[${pageContent?.trim()}]]>
</contextItem>`;
})
.join('');
return generateContext(contextItems);
})
.join('');
if (useFullContext) {
const prompt = `${header}
${context}
${footer}`;
return prompt;
}
const prompt = `${header}
${files}
A semantic search was executed with the user's message as the query, retrieving the following context inside <context></context> XML tags.
<context>${context}
</context>
${footer}`;
return prompt;
} catch (error) {
logger.error('Error creating context:', error);
throw error;
}
};
return {
processFile,
createContext,
};
}
module.exports = createContextHandlers;

View File

@@ -1,34 +0,0 @@
/**
* Generates a prompt instructing the user to describe an image in detail, tailored to different types of visual content.
* @param {boolean} pluralized - Whether to pluralize the prompt for multiple images.
* @returns {string} - The generated vision prompt.
*/
const createVisionPrompt = (pluralized = false) => {
return `Please describe the image${
pluralized ? 's' : ''
} in detail, covering relevant aspects such as:
For photographs, illustrations, or artwork:
- The main subject(s) and their appearance, positioning, and actions
- The setting, background, and any notable objects or elements
- Colors, lighting, and overall mood or atmosphere
- Any interesting details, textures, or patterns
- The style, technique, or medium used (if discernible)
For screenshots or images containing text:
- The content and purpose of the text
- The layout, formatting, and organization of the information
- Any notable visual elements, such as logos, icons, or graphics
- The overall context or message conveyed by the screenshot
For graphs, charts, or data visualizations:
- The type of graph or chart (e.g., bar graph, line chart, pie chart)
- The variables being compared or analyzed
- Any trends, patterns, or outliers in the data
- The axis labels, scales, and units of measurement
- The title, legend, and any additional context provided
Be as specific and descriptive as possible while maintaining clarity and concision.`;
};
module.exports = createVisionPrompt;

View File

@@ -1,4 +1,3 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
/**
@@ -8,16 +7,10 @@ const { HumanMessage, AIMessage, SystemMessage } = require('langchain/schema');
* @param {Object} params.message - The message object to format.
* @param {string} [params.message.role] - The role of the message sender (must be 'user').
* @param {string} [params.message.content] - The text content of the message.
* @param {EModelEndpoint} [params.endpoint] - Identifier for specific endpoint handling
* @param {Array<string>} [params.image_urls] - The image_urls to attach to the message.
* @returns {(Object)} - The formatted message.
*/
const formatVisionMessage = ({ message, image_urls, endpoint }) => {
if (endpoint === EModelEndpoint.anthropic) {
message.content = [...image_urls, { type: 'text', text: message.content }];
return message;
}
const formatVisionMessage = ({ message, image_urls }) => {
message.content = [{ type: 'text', text: message.content }, ...image_urls];
return message;
@@ -36,11 +29,10 @@ const formatVisionMessage = ({ message, image_urls, endpoint }) => {
* @param {Array<string>} [params.message.image_urls] - The image_urls attached to the message for Vision API.
* @param {string} [params.userName] - The name of the user.
* @param {string} [params.assistantName] - The name of the assistant.
* @param {string} [params.endpoint] - Identifier for specific endpoint handling
* @param {boolean} [params.langChain=false] - Whether to return a LangChain message object.
* @returns {(Object|HumanMessage|AIMessage|SystemMessage)} - The formatted message.
*/
const formatMessage = ({ message, userName, assistantName, endpoint, langChain = false }) => {
const formatMessage = ({ message, userName, assistantName, langChain = false }) => {
let { role: _role, _name, sender, text, content: _content, lc_id } = message;
if (lc_id && lc_id[2] && !langChain) {
const roleMapping = {
@@ -59,11 +51,7 @@ const formatMessage = ({ message, userName, assistantName, endpoint, langChain =
const { image_urls } = message;
if (Array.isArray(image_urls) && image_urls.length > 0 && role === 'user') {
return formatVisionMessage({
message: formattedMessage,
image_urls: message.image_urls,
endpoint,
});
return formatVisionMessage({ message: formattedMessage, image_urls: message.image_urls });
}
if (_name) {

View File

@@ -4,8 +4,6 @@ const handleInputs = require('./handleInputs');
const instructions = require('./instructions');
const titlePrompts = require('./titlePrompts');
const truncateText = require('./truncateText');
const createVisionPrompt = require('./createVisionPrompt');
const createContextHandlers = require('./createContextHandlers');
module.exports = {
...formatMessages,
@@ -13,7 +11,5 @@ module.exports = {
...handleInputs,
...instructions,
...titlePrompts,
...truncateText,
createVisionPrompt,
createContextHandlers,
truncateText,
};

View File

@@ -27,63 +27,7 @@ ${convo}`,
return titlePrompt;
};
const titleInstruction =
'a concise, 5-word-or-less title for the conversation, using its same language, with no punctuation. Apply title case conventions appropriate for the language. For English, use AP Stylebook Title Case. Never directly mention the language name or the word "title"';
const titleFunctionPrompt = `In this environment you have access to a set of tools you can use to generate the conversation title.
You may call them like this:
<function_calls>
<invoke>
<tool_name>$TOOL_NAME</tool_name>
<parameters>
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
...
</parameters>
</invoke>
</function_calls>
Here are the tools available:
<tools>
<tool_description>
<tool_name>submit_title</tool_name>
<description>
Submit a brief title in the conversation's language, following the parameter description closely.
</description>
<parameters>
<parameter>
<name>title</name>
<type>string</type>
<description>${titleInstruction}</description>
</parameter>
</parameters>
</tool_description>
</tools>`;
/**
* Parses titles from title functions based on the provided prompt.
* @param {string} prompt - The prompt containing the title function.
* @returns {string} The parsed title. "New Chat" if no title is found.
*/
function parseTitleFromPrompt(prompt) {
const titleRegex = /<title>(.+?)<\/title>/;
const titleMatch = prompt.match(titleRegex);
if (titleMatch && titleMatch[1]) {
const title = titleMatch[1].trim();
// // Capitalize the first letter of each word; Note: unnecessary due to title case prompting
// const capitalizedTitle = title.replace(/\b\w/g, (char) => char.toUpperCase());
return title;
}
return 'New Chat';
}
module.exports = {
langPrompt,
titleInstruction,
createTitlePrompt,
titleFunctionPrompt,
parseTitleFromPrompt,
};

View File

@@ -1,40 +1,10 @@
const MAX_CHAR = 255;
/**
* Truncates a given text to a specified maximum length, appending ellipsis and a notification
* if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text if the original text length exceeds maxLength, otherwise returns the original text.
*/
function truncateText(text, maxLength = MAX_CHAR) {
if (text.length > maxLength) {
return `${text.slice(0, maxLength)}... [text truncated for brevity]`;
function truncateText(text) {
if (text.length > MAX_CHAR) {
return `${text.slice(0, MAX_CHAR)}... [text truncated for brevity]`;
}
return text;
}
/**
* Truncates a given text to a specified maximum length by showing the first half and the last half of the text,
* separated by ellipsis. This method ensures the output does not exceed the maximum length, including the addition
* of ellipsis and notification if the original text exceeds the maximum length.
*
* @param {string} text - The text to be truncated.
* @param {number} [maxLength=MAX_CHAR] - The maximum length of the output text after truncation. Defaults to MAX_CHAR.
* @returns {string} The truncated text showing the first half and the last half, or the original text if it does not exceed maxLength.
*/
function smartTruncateText(text, maxLength = MAX_CHAR) {
const ellipsis = '...';
const notification = ' [text truncated for brevity]';
const halfMaxLength = Math.floor((maxLength - ellipsis.length - notification.length) / 2);
if (text.length > maxLength) {
const startLastHalf = text.length - halfMaxLength;
return `${text.slice(0, halfMaxLength)}${ellipsis}${text.slice(startLastHalf)}${notification}`;
}
return text;
}
module.exports = { truncateText, smartTruncateText };
module.exports = truncateText;

View File

@@ -0,0 +1,121 @@
const { google } = require('googleapis');
const { Tool } = require('langchain/tools');
const { logger } = require('~/config');
/**
* Represents a tool that allows an agent to use the Google Custom Search API.
* @extends Tool
*/
class GoogleSearchAPI extends Tool {
constructor(fields = {}) {
super();
this.cx = fields.GOOGLE_CSE_ID || this.getCx();
this.apiKey = fields.GOOGLE_API_KEY || this.getApiKey();
this.customSearch = undefined;
}
/**
* The name of the tool.
* @type {string}
*/
name = 'google';
/**
* A description for the agent to use
* @type {string}
*/
description =
'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
description_for_model =
'Use the \'google\' tool to retrieve internet search results relevant to your input. The results will return links and snippets of text from the webpages';
getCx() {
const cx = process.env.GOOGLE_CSE_ID || '';
if (!cx) {
throw new Error('Missing GOOGLE_CSE_ID environment variable.');
}
return cx;
}
getApiKey() {
const apiKey = process.env.GOOGLE_API_KEY || '';
if (!apiKey) {
throw new Error('Missing GOOGLE_API_KEY environment variable.');
}
return apiKey;
}
getCustomSearch() {
if (!this.customSearch) {
const version = 'v1';
this.customSearch = google.customsearch(version);
}
return this.customSearch;
}
resultsToReadableFormat(results) {
let output = 'Results:\n';
results.forEach((resultObj, index) => {
output += `Title: ${resultObj.title}\n`;
output += `Link: ${resultObj.link}\n`;
if (resultObj.snippet) {
output += `Snippet: ${resultObj.snippet}\n`;
}
if (index < results.length - 1) {
output += '\n';
}
});
return output;
}
/**
* Calls the tool with the provided input and returns a promise that resolves with a response from the Google Custom Search API.
* @param {string} input - The input to provide to the API.
* @returns {Promise<String>} A promise that resolves with a response from the Google Custom Search API.
*/
async _call(input) {
try {
const metadataResults = [];
const response = await this.getCustomSearch().cse.list({
q: input,
cx: this.cx,
auth: this.apiKey,
num: 5, // Limit the number of results to 5
});
// return response.data;
// logger.debug(response.data);
if (!response.data.items || response.data.items.length === 0) {
return this.resultsToReadableFormat([
{ title: 'No good Google Search Result was found', link: '' },
]);
}
// const results = response.items.slice(0, numResults);
const results = response.data.items;
for (const result of results) {
const metadataResult = {
title: result.title || '',
link: result.link || '',
};
if (result.snippet) {
metadataResult.snippet = result.snippet;
}
metadataResults.push(metadataResult);
}
return this.resultsToReadableFormat(metadataResults);
} catch (error) {
logger.error('[GoogleSearchAPI]', error);
// throw error;
return 'There was an error searching Google.';
}
}
}
module.exports = GoogleSearchAPI;

View File

@@ -1,6 +1,7 @@
const availableTools = require('./manifest.json');
// Basic Tools
const CodeBrew = require('./CodeBrew');
const GoogleSearchAPI = require('./GoogleSearch');
const WolframAlphaAPI = require('./Wolfram');
const AzureAiSearch = require('./AzureAiSearch');
const OpenAICreateImage = require('./DALL-E');
@@ -15,10 +16,8 @@ const CodeSherpa = require('./structured/CodeSherpa');
const StructuredSD = require('./structured/StableDiffusion');
const StructuredACS = require('./structured/AzureAISearch');
const CodeSherpaTools = require('./structured/CodeSherpaTools');
const GoogleSearchAPI = require('./structured/GoogleSearch');
const StructuredWolfram = require('./structured/Wolfram');
const TavilySearchResults = require('./structured/TavilySearchResults');
const TraversaalSearch = require('./structured/TraversaalSearch');
module.exports = {
availableTools,
@@ -40,5 +39,4 @@ module.exports = {
CodeSherpaTools,
StructuredWolfram,
TavilySearchResults,
TraversaalSearch,
};

View File

@@ -1,17 +1,4 @@
[
{
"name": "Traversaal",
"pluginKey": "traversaal_search",
"description": "Traversaal is a robust search API tailored for LLM Agents. Get an API key here: https://api.traversaal.ai",
"icon": "https://traversaal.ai/favicon.ico",
"authConfig": [
{
"authField": "TRAVERSAAL_API_KEY",
"label": "Traversaal API Key",
"description": "Get your API key here: <a href=\"https://api.traversaal.ai\" target=\"_blank\">https://api.traversaal.ai</a>"
}
]
},
{
"name": "Google",
"pluginKey": "google",
@@ -24,7 +11,7 @@
"description": "This is your Google Custom Search Engine ID. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md'>Our Docs</a>."
},
{
"authField": "GOOGLE_SEARCH_API_KEY",
"authField": "GOOGLE_API_KEY",
"label": "Google API Key",
"description": "This is your Google Custom Search API Key. For instructions on how to obtain this, see <a href='https://github.com/danny-avila/LibreChat/blob/main/docs/features/plugins/google_search.md'>Our Docs</a>."
}
@@ -60,7 +47,7 @@
"name": "CodeSherpa",
"pluginKey": "codesherpa_tools",
"description": "[Experimental] A REPL for your chat. Requires https://github.com/iamgreggarcia/codesherpa",
"icon": "https://raw.githubusercontent.com/iamgreggarcia/codesherpa/main/localserver/_logo.png",
"icon": "https://github.com/iamgreggarcia/codesherpa/blob/main/localserver/_logo.png",
"authConfig": [
{
"authField": "CODESHERPA_SERVER_URL",
@@ -124,7 +111,7 @@
{
"name": "Tavily Search",
"pluginKey": "tavily_search_results_json",
"description": "Tavily Search is a robust search API tailored for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.",
"description": "Tavily Search is a robust search API tailored specifically for LLM Agents. It seamlessly integrates with diverse data sources to ensure a superior, relevant search experience.",
"icon": "https://tavily.com/favicon.ico",
"authConfig": [
{

View File

@@ -12,15 +12,14 @@ const { logger } = require('~/config');
class DALLE3 extends Tool {
constructor(fields = {}) {
super();
/** @type {boolean} Used to initialize the Tool without necessary variables. */
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
/** @type {boolean} Necessary for output to contain all image metadata. */
/* Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
this.userId = fields.userId;
this.fileStrategy = fields.fileStrategy;
if (fields.processFileURL) {
/** @type {processFileURL} Necessary for output to contain all image metadata. */
this.processFileURL = fields.processFileURL.bind(this);
}
@@ -44,7 +43,6 @@ class DALLE3 extends Tool {
config.httpAgent = new HttpsProxyAgent(process.env.PROXY);
}
/** @type {OpenAI} */
this.openai = new OpenAI(config);
this.name = 'dalle';
this.description = `Use DALLE to create images from text descriptions.
@@ -166,7 +164,13 @@ Error Message: ${error.message}`;
});
if (this.returnMetadata) {
this.result = result;
this.result = {
file_id: result.file_id,
filename: result.filename,
filepath: result.filepath,
height: result.height,
width: result.width,
};
} else {
this.result = this.wrapInMarkdown(result.filepath);
}

View File

@@ -1,65 +0,0 @@
const { z } = require('zod');
const { Tool } = require('@langchain/core/tools');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
class GoogleSearchResults extends Tool {
static lc_name() {
return 'GoogleSearchResults';
}
constructor(fields = {}) {
super(fields);
this.envVarApiKey = 'GOOGLE_SEARCH_API_KEY';
this.envVarSearchEngineId = 'GOOGLE_CSE_ID';
this.override = fields.override ?? false;
this.apiKey = fields.apiKey ?? getEnvironmentVariable(this.envVarApiKey);
this.searchEngineId =
fields.searchEngineId ?? getEnvironmentVariable(this.envVarSearchEngineId);
this.kwargs = fields?.kwargs ?? {};
this.name = 'google';
this.description =
'A search engine optimized for comprehensive, accurate, and trusted results. Useful for when you need to answer questions about current events.';
this.schema = z.object({
query: z.string().min(1).describe('The search query string.'),
max_results: z
.number()
.min(1)
.max(10)
.optional()
.describe('The maximum number of search results to return. Defaults to 10.'),
// Note: Google API has its own parameters for search customization, adjust as needed.
});
}
async _call(input) {
const validationResult = this.schema.safeParse(input);
if (!validationResult.success) {
throw new Error(`Validation failed: ${JSON.stringify(validationResult.error.issues)}`);
}
const { query, max_results = 5 } = validationResult.data;
const response = await fetch(
`https://www.googleapis.com/customsearch/v1?key=${this.apiKey}&cx=${
this.searchEngineId
}&q=${encodeURIComponent(query)}&num=${max_results}`,
{
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
},
);
const json = await response.json();
if (!response.ok) {
throw new Error(`Request failed with status ${response.status}: ${json.error.message}`);
}
return JSON.stringify(json);
}
}
module.exports = GoogleSearchResults;

View File

@@ -4,27 +4,14 @@ const { z } = require('zod');
const path = require('path');
const axios = require('axios');
const sharp = require('sharp');
const { v4: uuidv4 } = require('uuid');
const { StructuredTool } = require('langchain/tools');
const { FileContext } = require('librechat-data-provider');
const paths = require('~/config/paths');
const { logger } = require('~/config');
class StableDiffusionAPI extends StructuredTool {
constructor(fields) {
super();
/** @type {string} User ID */
this.userId = fields.userId;
/** @type {Express.Request | undefined} Express Request object, only provided by ToolService */
this.req = fields.req;
/** @type {boolean} Used to initialize the Tool without necessary variables. */
/* Used to initialize the Tool without necessary variables. */
this.override = fields.override ?? false;
/** @type {boolean} Necessary for output to contain all image metadata. */
this.returnMetadata = fields.returnMetadata ?? false;
if (fields.uploadImageBuffer) {
/** @type {uploadImageBuffer} Necessary for output to contain all image metadata. */
this.uploadImageBuffer = fields.uploadImageBuffer.bind(this);
}
this.name = 'stable-diffusion';
this.url = fields.SD_WEBUI_URL || this.getServerURL();
@@ -60,7 +47,7 @@ class StableDiffusionAPI extends StructuredTool {
getMarkdownImageUrl(imageName) {
const imageUrl = path
.join(this.relativePath, this.userId, imageName)
.join(this.relativeImageUrl, imageName)
.replace(/\\/g, '/')
.replace('public/', '');
return `![generated image](/${imageUrl})`;
@@ -86,67 +73,46 @@ class StableDiffusionAPI extends StructuredTool {
width: 1024,
height: 1024,
};
const generationResponse = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
const image = generationResponse.data.images[0];
const response = await axios.post(`${url}/sdapi/v1/txt2img`, payload);
const image = response.data.images[0];
const pngPayload = { image: `data:image/png;base64,${image}` };
const response2 = await axios.post(`${url}/sdapi/v1/png-info`, pngPayload);
const info = response2.data.info;
/** @type {{ height: number, width: number, seed: number, infotexts: string[] }} */
let info = {};
try {
info = JSON.parse(generationResponse.data.info);
} catch (error) {
logger.error('[StableDiffusion] Error while getting image metadata:', error);
}
// Generate unique name
const imageName = `${Date.now()}.png`;
this.outputPath = path.resolve(
__dirname,
'..',
'..',
'..',
'..',
'..',
'client',
'public',
'images',
);
const appRoot = path.resolve(__dirname, '..', '..', '..', '..', '..', 'client');
this.relativeImageUrl = path.relative(appRoot, this.outputPath);
const file_id = uuidv4();
const imageName = `${file_id}.png`;
const { imageOutput: imageOutputPath, clientPath } = paths;
const filepath = path.join(imageOutputPath, this.userId, imageName);
this.relativePath = path.relative(clientPath, imageOutputPath);
if (!fs.existsSync(path.join(imageOutputPath, this.userId))) {
fs.mkdirSync(path.join(imageOutputPath, this.userId), { recursive: true });
// Check if directory exists, if not create it
if (!fs.existsSync(this.outputPath)) {
fs.mkdirSync(this.outputPath, { recursive: true });
}
try {
const buffer = Buffer.from(image.split(',', 1)[0], 'base64');
if (this.returnMetadata && this.uploadImageBuffer && this.req) {
const file = await this.uploadImageBuffer({
req: this.req,
context: FileContext.image_generation,
resize: false,
metadata: {
buffer,
height: info.height,
width: info.width,
bytes: Buffer.byteLength(buffer),
filename: imageName,
type: 'image/png',
file_id,
},
});
const generationInfo = info.infotexts[0].split('\n').pop();
return {
...file,
prompt,
metadata: {
negative_prompt,
seed: info.seed,
info: generationInfo,
},
};
}
await sharp(buffer)
.withMetadata({
iptcpng: {
parameters: info.infotexts[0],
parameters: info,
},
})
.toFile(filepath);
.toFile(this.outputPath + '/' + imageName);
this.result = this.getMarkdownImageUrl(imageName);
} catch (error) {
logger.error('[StableDiffusion] Error while saving the image:', error);
// this.result = theImageUrl;
}
return this.result;

View File

@@ -1,89 +0,0 @@
const { z } = require('zod');
const { Tool } = require('@langchain/core/tools');
const { getEnvironmentVariable } = require('@langchain/core/utils/env');
const { logger } = require('~/config');
/**
* Tool for the Traversaal AI search API, Ares.
*/
class TraversaalSearch extends Tool {
static lc_name() {
return 'TraversaalSearch';
}
constructor(fields) {
super(fields);
this.name = 'traversaal_search';
this.description = `An AI search engine optimized for comprehensive, accurate, and trusted results.
Useful for when you need to answer questions about current events. Input should be a search query.`;
this.description_for_model =
'\'Please create a specific sentence for the AI to understand and use as a query to search the web based on the user\'s request. For example, "Find information about the highest mountains in the world." or "Show me the latest news articles about climate change and its impact on polar ice caps."\'';
this.schema = z.object({
query: z
.string()
.describe(
'A properly written sentence to be interpreted by an AI to search the web according to the user\'s request.',
),
});
this.apiKey = fields?.TRAVERSAAL_API_KEY ?? this.getApiKey();
}
getApiKey() {
const apiKey = getEnvironmentVariable('TRAVERSAAL_API_KEY');
if (!apiKey && this.override) {
throw new Error(
'No Traversaal API key found. Either set an environment variable named "TRAVERSAAL_API_KEY" or pass an API key as "apiKey".',
);
}
return apiKey;
}
// eslint-disable-next-line no-unused-vars
async _call({ query }, _runManager) {
const body = {
query: [query],
};
try {
const response = await fetch('https://api-ares.traversaal.ai/live/predict', {
method: 'POST',
headers: {
'content-type': 'application/json',
'x-api-key': this.apiKey,
},
body: JSON.stringify({ ...body }),
});
const json = await response.json();
if (!response.ok) {
throw new Error(
`Request failed with status code ${response.status}: ${json.error ?? json.message}`,
);
}
if (!json.data) {
throw new Error('Could not parse Traversaal API results. Please try again.');
}
const baseText = json.data?.response_text ?? '';
const sources = json.data?.web_url;
const noResponse = 'No response found in Traversaal API results';
if (!baseText && !sources) {
return noResponse;
}
const sourcesText = sources?.length ? '\n\nSources:\n - ' + sources.join('\n - ') : '';
const result = baseText + sourcesText;
if (!result) {
return noResponse;
}
return result;
} catch (error) {
logger.error('Traversaal API request failed', error);
return `Traversaal API request failed: ${error.message}`;
}
}
}
module.exports = TraversaalSearch;

View File

@@ -20,7 +20,6 @@ const {
StructuredSD,
StructuredACS,
CodeSherpaTools,
TraversaalSearch,
StructuredWolfram,
TavilySearchResults,
} = require('../');
@@ -166,7 +165,6 @@ const loadTools = async ({
'stable-diffusion': functions ? StructuredSD : StableDiffusionAPI,
'azure-ai-search': functions ? StructuredACS : AzureAISearch,
CodeBrew: CodeBrew,
traversaal_search: TraversaalSearch,
};
const openAIApiKey = await getOpenAIKey(options, user);
@@ -237,11 +235,9 @@ const loadTools = async ({
}
const imageGenOptions = {
req: options.req,
fileStrategy: options.fileStrategy,
processFileURL: options.processFileURL,
returnMetadata: options.returnMetadata,
uploadImageBuffer: options.uploadImageBuffer,
};
const toolOptions = {

View File

@@ -1,6 +1,5 @@
const { getUserPluginAuthValue } = require('~/server/services/PluginService');
const { availableTools } = require('../');
const { logger } = require('~/config');
/**
* Loads a suite of tools with authentication values for a given user, supporting alternate authentication fields.
@@ -31,7 +30,7 @@ const loadToolSuite = async ({ pluginKey, tools, user, options = {} }) => {
return value;
}
} catch (err) {
logger.error(`Error fetching plugin auth value for ${field}: ${err.message}`);
console.error(`Error fetching plugin auth value for ${field}: ${err.message}`);
}
}
return null;
@@ -42,7 +41,7 @@ const loadToolSuite = async ({ pluginKey, tools, user, options = {} }) => {
if (authValue !== null) {
authValues[auth.authField] = authValue;
} else {
logger.warn(`[loadToolSuite] No auth value found for ${auth.authField}`);
console.warn(`No auth value found for ${auth.authField}`);
}
}

View File

@@ -1,7 +1,6 @@
const { ViolationTypes } = require('librechat-data-provider');
const { isEnabled, math, removePorts } = require('~/server/utils');
const getLogStores = require('./getLogStores');
const Session = require('~/models/Session');
const getLogStores = require('./getLogStores');
const { isEnabled, math, removePorts } = require('~/server/utils');
const { logger } = require('~/config');
const { BAN_VIOLATIONS, BAN_INTERVAL } = process.env ?? {};
@@ -49,7 +48,7 @@ const banViolation = async (req, res, errorMessage) => {
await Session.deleteAllUserSessions(user_id);
res.clearCookie('refreshToken');
const banLogs = getLogStores(ViolationTypes.BAN);
const banLogs = getLogStores('ban');
const duration = errorMessage.duration || banLogs.opts.ttl;
if (duration <= 0) {

View File

@@ -6,7 +6,6 @@ jest.mock('../models/Session');
jest.mock('./getLogStores', () => {
return jest.fn().mockImplementation(() => {
const EventEmitter = require('events');
const { CacheKeys } = require('librechat-data-provider');
const math = require('../server/utils/math');
const mockGet = jest.fn();
const mockSet = jest.fn();
@@ -34,7 +33,7 @@ jest.mock('./getLogStores', () => {
}
return new KeyvMongo('', {
namespace: CacheKeys.BANS,
namespace: 'bans',
ttl: math(process.env.BAN_DURATION, 7200000),
});
});

View File

@@ -1,12 +1,11 @@
const Keyv = require('keyv');
const { CacheKeys, ViolationTypes } = require('librechat-data-provider');
const { CacheKeys } = require('librechat-data-provider');
const { logFile, violationFile } = require('./keyvFiles');
const { math, isEnabled } = require('~/server/utils');
const keyvRedis = require('./keyvRedis');
const keyvMongo = require('./keyvMongo');
const { BAN_DURATION, USE_REDIS } = process.env ?? {};
const THIRTY_MINUTES = 1800000;
const duration = math(BAN_DURATION, 7200000);
@@ -25,8 +24,8 @@ const config = isEnabled(USE_REDIS)
: new Keyv({ namespace: CacheKeys.CONFIG_STORE });
const tokenConfig = isEnabled(USE_REDIS) // ttl: 30 minutes
? new Keyv({ store: keyvRedis, ttl: THIRTY_MINUTES })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: THIRTY_MINUTES });
? new Keyv({ store: keyvRedis, ttl: 1800000 })
: new Keyv({ namespace: CacheKeys.TOKEN_CONFIG, ttl: 1800000 });
const genTitle = isEnabled(USE_REDIS) // ttl: 2 minutes
? new Keyv({ store: keyvRedis, ttl: 120000 })
@@ -38,27 +37,19 @@ const modelQueries = isEnabled(process.env.USE_REDIS)
const abortKeys = isEnabled(USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: CacheKeys.ABORT_KEYS, ttl: 600000 });
: new Keyv({ namespace: CacheKeys.ABORT_KEYS });
const namespaces = {
[CacheKeys.CONFIG_STORE]: config,
pending_req,
[ViolationTypes.BAN]: new Keyv({ store: keyvMongo, namespace: CacheKeys.BANS, ttl: duration }),
[CacheKeys.ENCODED_DOMAINS]: new Keyv({
store: keyvMongo,
namespace: CacheKeys.ENCODED_DOMAINS,
ttl: 0,
}),
ban: new Keyv({ store: keyvMongo, namespace: 'bans', ttl: duration }),
general: new Keyv({ store: logFile, namespace: 'violations' }),
concurrent: createViolationInstance('concurrent'),
non_browser: createViolationInstance('non_browser'),
message_limit: createViolationInstance('message_limit'),
token_balance: createViolationInstance(ViolationTypes.TOKEN_BALANCE),
token_balance: createViolationInstance('token_balance'),
registrations: createViolationInstance('registrations'),
[ViolationTypes.FILE_UPLOAD_LIMIT]: createViolationInstance(ViolationTypes.FILE_UPLOAD_LIMIT),
[ViolationTypes.ILLEGAL_MODEL_REQUEST]: createViolationInstance(
ViolationTypes.ILLEGAL_MODEL_REQUEST,
),
[CacheKeys.FILE_UPLOAD_LIMIT]: createViolationInstance(CacheKeys.FILE_UPLOAD_LIMIT),
logins: createViolationInstance('logins'),
[CacheKeys.ABORT_KEYS]: abortKeys,
[CacheKeys.TOKEN_CONFIG]: tokenConfig,

View File

@@ -1,13 +1,9 @@
const path = require('path');
module.exports = {
root: path.resolve(__dirname, '..', '..'),
uploads: path.resolve(__dirname, '..', '..', 'uploads'),
clientPath: path.resolve(__dirname, '..', '..', 'client'),
dist: path.resolve(__dirname, '..', '..', 'client', 'dist'),
publicPath: path.resolve(__dirname, '..', '..', 'client', 'public'),
fonts: path.resolve(__dirname, '..', '..', 'client', 'public', 'fonts'),
assets: path.resolve(__dirname, '..', '..', 'client', 'public', 'assets'),
imageOutput: path.resolve(__dirname, '..', '..', 'client', 'public', 'images'),
structuredTools: path.resolve(__dirname, '..', 'app', 'clients', 'tools', 'structured'),
pluginManifest: path.resolve(__dirname, '..', 'app', 'clients', 'tools', 'manifest.json'),

View File

@@ -5,15 +5,7 @@ const { redactFormat, redactMessage, debugTraverse } = require('./parsers');
const logDir = path.join(__dirname, '..', 'logs');
const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false, CONSOLE_JSON = false } = process.env;
const useConsoleJson =
(typeof CONSOLE_JSON === 'string' && CONSOLE_JSON?.toLowerCase() === 'true') ||
CONSOLE_JSON === true;
const useDebugConsole =
(typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
DEBUG_CONSOLE === true;
const { NODE_ENV, DEBUG_LOGGING = true, DEBUG_CONSOLE = false } = process.env;
const levels = {
error: 0,
@@ -41,7 +33,7 @@ const level = () => {
const fileFormat = winston.format.combine(
redactFormat(),
winston.format.timestamp({ format: () => new Date().toISOString() }),
winston.format.timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }),
winston.format.errors({ stack: true }),
winston.format.splat(),
// redactErrors(),
@@ -107,20 +99,14 @@ const consoleFormat = winston.format.combine(
}),
);
if (useDebugConsole) {
if (
(typeof DEBUG_CONSOLE === 'string' && DEBUG_CONSOLE?.toLowerCase() === 'true') ||
DEBUG_CONSOLE === true
) {
transports.push(
new winston.transports.Console({
level: 'debug',
format: useConsoleJson
? winston.format.combine(fileFormat, debugTraverse, winston.format.json())
: winston.format.combine(fileFormat, debugTraverse),
}),
);
} else if (useConsoleJson) {
transports.push(
new winston.transports.Console({
level: 'info',
format: winston.format.combine(fileFormat, winston.format.json()),
format: winston.format.combine(consoleFormat, debugTraverse),
}),
);
} else {

View File

@@ -5,18 +5,19 @@ const Action = mongoose.model('action', actionSchema);
/**
* Update an action with new data without overwriting existing properties,
* or create a new action if it doesn't exist, within a transaction session if provided.
* or create a new action if it doesn't exist.
*
* @param {Object} searchParams - The search parameters to find the action to update.
* @param {string} searchParams.action_id - The ID of the action to update.
* @param {string} searchParams.user - The user ID of the action's author.
* @param {Object} updateData - An object containing the properties to update.
* @param {mongoose.ClientSession} [session] - The transaction session to use.
* @returns {Promise<Object>} The updated or newly created action document as a plain object.
*/
const updateAction = async (searchParams, updateData, session = null) => {
const options = { new: true, upsert: true, session };
return await Action.findOneAndUpdate(searchParams, updateData, options).lean();
const updateAction = async (searchParams, updateData) => {
return await Action.findOneAndUpdate(searchParams, updateData, {
new: true,
upsert: true,
}).lean();
};
/**
@@ -49,17 +50,15 @@ const getActions = async (searchParams, includeSensitive = false) => {
};
/**
* Deletes an action by params, within a transaction session if provided.
* Deletes an action by its ID.
*
* @param {Object} searchParams - The search parameters to find the action to delete.
* @param {string} searchParams.action_id - The ID of the action to delete.
* @param {Object} searchParams - The search parameters to find the action to update.
* @param {string} searchParams.action_id - The ID of the action to update.
* @param {string} searchParams.user - The user ID of the action's author.
* @param {mongoose.ClientSession} [session] - The transaction session to use (optional).
* @returns {Promise<Object>} A promise that resolves to the deleted action document as a plain object, or null if no document was found.
*/
const deleteAction = async (searchParams, session = null) => {
const options = session ? { session } : {};
return await Action.findOneAndDelete(searchParams, options).lean();
const deleteAction = async (searchParams) => {
return await Action.findOneAndDelete(searchParams).lean();
};
module.exports = {

View File

@@ -5,18 +5,19 @@ const Assistant = mongoose.model('assistant', assistantSchema);
/**
* Update an assistant with new data without overwriting existing properties,
* or create a new assistant if it doesn't exist, within a transaction session if provided.
* or create a new assistant if it doesn't exist.
*
* @param {Object} searchParams - The search parameters to find the assistant to update.
* @param {string} searchParams.assistant_id - The ID of the assistant to update.
* @param {string} searchParams.user - The user ID of the assistant's author.
* @param {Object} updateData - An object containing the properties to update.
* @param {mongoose.ClientSession} [session] - The transaction session to use (optional).
* @returns {Promise<Object>} The updated or newly created assistant document as a plain object.
*/
const updateAssistant = async (searchParams, updateData, session = null) => {
const options = { new: true, upsert: true, session };
return await Assistant.findOneAndUpdate(searchParams, updateData, options).lean();
const updateAssistant = async (searchParams, updateData) => {
return await Assistant.findOneAndUpdate(searchParams, updateData, {
new: true,
upsert: true,
}).lean();
};
/**

View File

@@ -69,7 +69,7 @@ const updateFileUsage = async (data) => {
const { file_id, inc = 1 } = data;
const updateOperation = {
$inc: { usage: inc },
$unset: { expiresAt: '', temp_file_id: '' },
$unset: { expiresAt: '' },
};
return await File.findOneAndUpdate({ file_id }, updateOperation, { new: true }).lean();
};

View File

@@ -2,7 +2,6 @@ const mongoose = require('mongoose');
const { isEnabled } = require('../server/utils/handleText');
const transactionSchema = require('./schema/transaction');
const { getMultiplier } = require('./tx');
const { logger } = require('~/config');
const Balance = require('./Balance');
const cancelRate = 1.15;
@@ -12,7 +11,7 @@ transactionSchema.methods.calculateTokenValue = function () {
this.tokenValue = this.rawAmount;
}
const { valueKey, tokenType, model, endpointTokenConfig } = this;
const multiplier = Math.abs(getMultiplier({ valueKey, tokenType, model, endpointTokenConfig }));
const multiplier = getMultiplier({ valueKey, tokenType, model, endpointTokenConfig });
this.rate = multiplier;
this.tokenValue = this.rawAmount * multiplier;
if (this.context && this.tokenType === 'completion' && this.context === 'incomplete') {
@@ -36,44 +35,12 @@ transactionSchema.statics.create = async function (transactionData) {
return;
}
let balance = await Balance.findOne({ user: transaction.user }).lean();
let incrementValue = transaction.tokenValue;
if (balance && balance?.tokenCredits + incrementValue < 0) {
incrementValue = -balance.tokenCredits;
}
balance = await Balance.findOneAndUpdate(
// Adjust the user's balance
return await Balance.findOneAndUpdate(
{ user: transaction.user },
{ $inc: { tokenCredits: incrementValue } },
{ $inc: { tokenCredits: transaction.tokenValue } },
{ upsert: true, new: true },
).lean();
return {
rate: transaction.rate,
user: transaction.user.toString(),
balance: balance.tokenCredits,
[transaction.tokenType]: incrementValue,
};
};
const Transaction = mongoose.model('Transaction', transactionSchema);
/**
* Queries and retrieves transactions based on a given filter.
* @async
* @function getTransactions
* @param {Object} filter - MongoDB filter object to apply when querying transactions.
* @returns {Promise<Array>} A promise that resolves to an array of matched transactions.
* @throws {Error} Throws an error if querying the database fails.
*/
async function getTransactions(filter) {
try {
return await Transaction.find(filter).lean();
} catch (error) {
logger.error('Error querying transactions:', error);
throw error;
}
}
module.exports = { Transaction, getTransactions };
module.exports = mongoose.model('Transaction', transactionSchema);

View File

@@ -1,6 +1,5 @@
const { ViolationTypes } = require('librechat-data-provider');
const { logViolation } = require('~/cache');
const Balance = require('./Balance');
const { logViolation } = require('../cache');
/**
* Checks the balance for a user and determines if they can spend a certain amount.
* If the user cannot spend the amount, it logs a violation and denies the request.
@@ -26,7 +25,7 @@ const checkBalance = async ({ req, res, txData }) => {
return true;
}
const type = ViolationTypes.TOKEN_BALANCE;
const type = 'token_balance';
const errorMessage = {
type,
balance,

View File

@@ -22,12 +22,14 @@ const Key = require('./Key');
const User = require('./User');
const Session = require('./Session');
const Balance = require('./Balance');
const Transaction = require('./Transaction');
module.exports = {
User,
Key,
Session,
Balance,
Transaction,
hashPassword,
updateUser,

View File

@@ -45,6 +45,7 @@ const actionSchema = new Schema({
auth: AuthSchema,
domain: {
type: String,
unique: true,
required: true,
},
// json_schema: Schema.Types.Mixed,

View File

@@ -9,6 +9,7 @@ const assistantSchema = mongoose.Schema(
},
assistant_id: {
type: String,
unique: true,
index: true,
required: true,
},

View File

@@ -70,14 +70,10 @@ const conversationPreset = {
type: String,
},
file_ids: { type: [{ type: String }], default: undefined },
// deprecated
// vision
resendImages: {
type: Boolean,
},
// files
resendFiles: {
type: Boolean,
},
imageDetail: {
type: String,
},

View File

@@ -15,9 +15,6 @@ const mongoose = require('mongoose');
* @property {'file'} object - Type of object, always 'file'
* @property {string} type - Type of file
* @property {number} usage - Number of uses of the file
* @property {string} [context] - Context of the file origin
* @property {boolean} [embedded] - Whether or not the file is embedded in vector db
* @property {string} [model] - The model to identify the group region of the file (for Azure OpenAI hosting)
* @property {string} [source] - The source of the file
* @property {number} [width] - Optional width of the file
* @property {number} [height] - Optional height of the file
@@ -64,9 +61,6 @@ const fileSchema = mongoose.Schema(
required: true,
default: 'file',
},
embedded: {
type: Boolean,
},
type: {
type: String,
required: true,
@@ -84,9 +78,6 @@ const fileSchema = mongoose.Schema(
type: String,
default: FileSources.local,
},
model: {
type: String,
},
width: Number,
height: Number,
expiresAt: {
@@ -99,6 +90,4 @@ const fileSchema = mongoose.Schema(
},
);
fileSchema.index({ createdAt: 1, updatedAt: 1 });
module.exports = fileSchema;

View File

@@ -1,4 +1,4 @@
const { Transaction } = require('./Transaction');
const Transaction = require('./Transaction');
const { logger } = require('~/config');
/**
@@ -21,15 +21,10 @@ const { logger } = require('~/config');
*/
const spendTokens = async (txData, tokenUsage) => {
const { promptTokens, completionTokens } = tokenUsage;
logger.debug(
`[spendTokens] conversationId: ${txData.conversationId}${
txData?.context ? ` | Context: ${txData?.context}` : ''
} | Token usage: `,
{
promptTokens,
completionTokens,
},
);
logger.debug(`[spendTokens] conversationId: ${txData.conversationId} | Token usage: `, {
promptTokens,
completionTokens,
});
let prompt, completion;
try {
if (promptTokens >= 0) {
@@ -54,12 +49,8 @@ const spendTokens = async (txData, tokenUsage) => {
prompt &&
completion &&
logger.debug('[spendTokens] Transaction data record against balance:', {
user: txData.user,
prompt: prompt.prompt,
promptRate: prompt.rate,
completion: completion.completion,
completionRate: completion.rate,
balance: completion.balance,
prompt,
completion,
});
} catch (err) {
logger.error('[spendTokens]', err);

View File

@@ -3,7 +3,6 @@ const defaultRate = 6;
/**
* Mapping of model token sizes to their respective multipliers for prompt and completion.
* The rates are 1 USD per 1M tokens.
* @type {Object.<string, {prompt: number, completion: number}>}
*/
const tokenValues = {
@@ -14,21 +13,6 @@ const tokenValues = {
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
'gpt-4-1106': { prompt: 10, completion: 30 },
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
'claude-3-opus': { prompt: 15, completion: 75 },
'claude-3-sonnet': { prompt: 3, completion: 15 },
'claude-3-haiku': { prompt: 0.25, completion: 1.25 },
'claude-2.1': { prompt: 8, completion: 24 },
'claude-2': { prompt: 8, completion: 24 },
'claude-': { prompt: 0.8, completion: 2.4 },
'command-r-plus': { prompt: 3, completion: 15 },
'command-r': { prompt: 0.5, completion: 1.5 },
/* cohere doesn't have rates for the older command models,
so this was from https://artificialanalysis.ai/models/command-light/providers */
command: { prompt: 0.38, completion: 0.38 },
// 'gemini-1.5': { prompt: 7, completion: 21 }, // May 2nd, 2024 pricing
// 'gemini': { prompt: 0.5, completion: 1.5 }, // May 2nd, 2024 pricing
'gemini-1.5': { prompt: 0, completion: 0 }, // currently free
gemini: { prompt: 0, completion: 0 }, // currently free
};
/**
@@ -52,8 +36,6 @@ const getValueKey = (model, endpoint) => {
return 'gpt-3.5-turbo-1106';
} else if (modelName.includes('gpt-3.5')) {
return '4k';
} else if (modelName.includes('gpt-4-vision')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-1106')) {
return 'gpt-4-1106';
} else if (modelName.includes('gpt-4-0125')) {
@@ -64,8 +46,6 @@ const getValueKey = (model, endpoint) => {
return '32k';
} else if (modelName.includes('gpt-4')) {
return '8k';
} else if (tokenValues[modelName]) {
return modelName;
}
return undefined;

View File

@@ -34,13 +34,6 @@ describe('getValueKey', () => {
expect(getValueKey('openai/gpt-4-1106')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-1106/openai/')).toBe('gpt-4-1106');
});
it('should return "gpt-4-1106" for model type of "gpt-4-1106"', () => {
expect(getValueKey('gpt-4-vision-preview')).toBe('gpt-4-1106');
expect(getValueKey('openai/gpt-4-1106')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-turbo')).toBe('gpt-4-1106');
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
});
});
describe('getMultiplier', () => {

View File

@@ -1,19 +1,13 @@
{
"name": "@librechat/backend",
"version": "0.7.1",
"version": "0.6.10",
"description": "",
"scripts": {
"start": "echo 'please run this from the root directory'",
"server-dev": "echo 'please run this from the root directory'",
"test": "cross-env NODE_ENV=test jest",
"b:test": "NODE_ENV=test bun jest",
"test:ci": "jest --ci",
"add-balance": "node ./add-balance.js",
"list-balances": "node ./list-balances.js",
"user-stats": "node ./user-stats.js",
"create-user": "node ./create-user.js",
"ban-user": "node ./ban-user.js",
"delete-user": "node ./delete-user.js"
"test:ci": "jest --ci"
},
"repository": {
"type": "git",
@@ -31,20 +25,18 @@
"bugs": {
"url": "https://github.com/danny-avila/LibreChat/issues"
},
"homepage": "https://librechat.ai",
"homepage": "https://github.com/danny-avila/LibreChat#readme",
"dependencies": {
"@anthropic-ai/sdk": "^0.16.1",
"@anthropic-ai/sdk": "^0.5.4",
"@azure/search-documents": "^12.0.0",
"@google/generative-ai": "^0.5.0",
"@keyv/mongo": "^2.1.8",
"@keyv/redis": "^2.8.1",
"@langchain/community": "^0.0.46",
"@langchain/google-genai": "^0.0.11",
"@langchain/google-vertexai": "^0.0.5",
"@langchain/community": "^0.0.17",
"@langchain/google-genai": "^0.0.8",
"axios": "^1.3.4",
"bcryptjs": "^2.4.3",
"cheerio": "^1.0.0-rc.12",
"cohere-ai": "^7.9.1",
"cohere-ai": "^6.0.0",
"connect-redis": "^7.1.0",
"cookie": "^0.5.0",
"cors": "^2.8.5",
@@ -67,14 +59,14 @@
"langchain": "^0.0.214",
"librechat-data-provider": "*",
"lodash": "^4.17.21",
"meilisearch": "^0.38.0",
"meilisearch": "^0.33.0",
"mime": "^3.0.0",
"module-alias": "^2.2.3",
"mongoose": "^7.1.1",
"multer": "^1.4.5-lts.1",
"nodejs-gpt": "^1.37.4",
"nodemailer": "^6.9.4",
"openai": "4.36.0",
"openai": "^4.20.1",
"openai-chat-tokens": "^0.2.8",
"openid-client": "^5.4.2",
"passport": "^0.6.0",

View File

@@ -1,8 +1,7 @@
const throttle = require('lodash/throttle');
const { getResponseSender, Constants, EModelEndpoint } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { getResponseSender, Constants } = require('librechat-data-provider');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { saveMessage, getConvo } = require('~/models');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { logger } = require('~/config');
const AskController = async (req, res, next, initializeClient, addTitle) => {
@@ -17,10 +16,13 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
logger.debug('[AskController]', { text, conversationId, ...endpointOption });
let metadata;
let userMessage;
let promptTokens;
let userMessageId;
let responseMessageId;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
@@ -29,6 +31,8 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
const newConvo = !conversationId;
const user = req.user.id;
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
@@ -48,10 +52,13 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
try {
const { client } = await initializeClient({ req, res, endpointOption });
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
const { onProgress: progressCallback, getPartialText } = createOnProgress({
onProgress: throttle(
({ text: partialText }) => {
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
messageId: responseMessageId,
sender,
@@ -59,14 +66,16 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
parentMessageId: overrideParentMessageId ?? userMessageId,
text: partialText,
model: client.modelOptions.model,
unfinished,
unfinished: true,
error: false,
user,
});
},
3000,
{ trailing: false },
),
}
if (saveDelay < 500) {
saveDelay = 500;
}
},
});
getText = getPartialText;
@@ -83,20 +92,6 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
const { abortController, onStart } = createAbortController(req, res, getAbortData);
res.on('close', () => {
logger.debug('[AskController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
abortController.abort();
logger.debug('[AskController] Request aborted on close');
});
const messageOptions = {
user,
parentMessageId,
@@ -104,6 +99,7 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
overrideParentMessageId,
getReqData,
onStart,
addMetadata,
abortController,
onProgress: progressCallback.call(null, {
res,
@@ -118,23 +114,22 @@ const AskController = async (req, res, next, initializeClient, addTitle) => {
response.parentMessageId = overrideParentMessageId;
}
response.endpoint = endpointOption.endpoint;
if (metadata) {
response = { ...response, ...metadata };
}
const conversation = await getConvo(user, conversationId);
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
response.endpoint = endpointOption.endpoint;
if (client.options.attachments) {
userMessage.files = client.options.attachments;
conversation.model = endpointOption.modelOptions.model;
delete userMessage.image_urls;
}
if (!abortController.signal.aborted) {
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation,
title: conversation.title,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});

View File

@@ -76,14 +76,14 @@ const refreshController = async (req, res) => {
}
try {
const payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const user = await User.findOne({ _id: payload.id });
let payload;
payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
const userId = payload.id;
const user = await User.findOne({ _id: userId });
if (!user) {
return res.status(401).redirect('/login');
}
const userId = payload.id;
if (process.env.NODE_ENV === 'CI') {
const token = await setAuthTokens(userId, res);
const userObj = user.toJSON();
@@ -118,6 +118,6 @@ module.exports = {
getUserController,
refreshController,
registrationController,
resetPasswordController,
resetPasswordRequestController,
resetPasswordController,
};

View File

@@ -1,8 +1,7 @@
const throttle = require('lodash/throttle');
const { getResponseSender, EModelEndpoint } = require('librechat-data-provider');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { getResponseSender } = require('librechat-data-provider');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { saveMessage, getConvo } = require('~/models');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { createAbortController, handleAbortError } = require('~/server/middleware');
const { logger } = require('~/config');
const EditController = async (req, res, next, initializeClient) => {
@@ -26,8 +25,11 @@ const EditController = async (req, res, next, initializeClient) => {
...endpointOption,
});
let metadata;
let userMessage;
let promptTokens;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
@@ -36,6 +38,7 @@ const EditController = async (req, res, next, initializeClient) => {
const userMessageId = parentMessageId;
const user = req.user.id;
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
@@ -48,11 +51,13 @@ const EditController = async (req, res, next, initializeClient) => {
}
};
const unfinished = endpointOption.endpoint === EModelEndpoint.google ? false : true;
const { onProgress: progressCallback, getPartialText } = createOnProgress({
generation,
onProgress: throttle(
({ text: partialText }) => {
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
messageId: responseMessageId,
sender,
@@ -60,15 +65,17 @@ const EditController = async (req, res, next, initializeClient) => {
parentMessageId: overrideParentMessageId ?? userMessageId,
text: partialText,
model: endpointOption.modelOptions.model,
unfinished,
unfinished: true,
isEdited: true,
error: false,
user,
});
},
3000,
{ trailing: false },
),
}
if (saveDelay < 500) {
saveDelay = 500;
}
},
});
const getAbortData = () => ({
@@ -83,20 +90,6 @@ const EditController = async (req, res, next, initializeClient) => {
const { abortController, onStart } = createAbortController(req, res, getAbortData);
res.on('close', () => {
logger.debug('[EditController] Request closed');
if (!abortController) {
return;
} else if (abortController.signal.aborted) {
return;
} else if (abortController.requestCompleted) {
return;
}
abortController.abort();
logger.debug('[EditController] Request aborted on close');
});
try {
const { client } = await initializeClient({ req, res, endpointOption });
@@ -111,6 +104,7 @@ const EditController = async (req, res, next, initializeClient) => {
overrideParentMessageId,
getReqData,
onStart,
addMetadata,
abortController,
onProgress: progressCallback.call(null, {
res,
@@ -119,19 +113,15 @@ const EditController = async (req, res, next, initializeClient) => {
}),
});
const conversation = await getConvo(user, conversationId);
conversation.title =
conversation && !conversation.title ? null : conversation?.title || 'New Chat';
if (client.options.attachments) {
conversation.model = endpointOption.modelOptions.model;
if (metadata) {
response = { ...response, ...metadata };
}
if (!abortController.signal.aborted) {
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation,
title: conversation.title,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});

View File

@@ -1,4 +1,4 @@
const { CacheKeys, EModelEndpoint, orderEndpointsConfig } = require('librechat-data-provider');
const { CacheKeys, EModelEndpoint } = require('librechat-data-provider');
const { loadDefaultEndpointsConfig, loadConfigEndpoints } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
@@ -10,24 +10,15 @@ async function endpointController(req, res) {
return;
}
const defaultEndpointsConfig = await loadDefaultEndpointsConfig(req);
const customConfigEndpoints = await loadConfigEndpoints(req);
const defaultEndpointsConfig = await loadDefaultEndpointsConfig();
const customConfigEndpoints = await loadConfigEndpoints();
/** @type {TEndpointsConfig} */
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
const { disableBuilder, retrievalModels, capabilities, ..._rest } =
req.app.locals[EModelEndpoint.assistants];
mergedConfig[EModelEndpoint.assistants] = {
...mergedConfig[EModelEndpoint.assistants],
retrievalModels,
disableBuilder,
capabilities,
};
const endpointsConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
if (endpointsConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
endpointsConfig[EModelEndpoint.assistants].disableBuilder =
req.app.locals[EModelEndpoint.assistants].disableBuilder;
}
const endpointsConfig = orderEndpointsConfig(mergedConfig);
await cache.set(CacheKeys.ENDPOINT_CONFIG, endpointsConfig);
res.send(JSON.stringify(endpointsConfig));
}

View File

@@ -2,26 +2,12 @@ const { CacheKeys } = require('librechat-data-provider');
const { loadDefaultModels, loadConfigModels } = require('~/server/services/Config');
const { getLogStores } = require('~/cache');
const getModelsConfig = async (req) => {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
let modelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
if (!modelsConfig) {
modelsConfig = await loadModels(req);
}
return modelsConfig;
};
/**
* Loads the models from the config.
* @param {Express.Request} req - The Express request object.
* @returns {Promise<TModelsConfig>} The models config.
*/
async function loadModels(req) {
async function modelController(req, res) {
const cache = getLogStores(CacheKeys.CONFIG_STORE);
const cachedModelsConfig = await cache.get(CacheKeys.MODELS_CONFIG);
if (cachedModelsConfig) {
return cachedModelsConfig;
res.send(cachedModelsConfig);
return;
}
const defaultModelsConfig = await loadDefaultModels(req);
const customModelsConfig = await loadConfigModels(req);
@@ -29,12 +15,7 @@ async function loadModels(req) {
const modelConfig = { ...defaultModelsConfig, ...customModelsConfig };
await cache.set(CacheKeys.MODELS_CONFIG, modelConfig);
return modelConfig;
}
async function modelController(req, res) {
const modelConfig = await loadModels(req);
res.send(modelConfig);
}
module.exports = { modelController, loadModels, getModelsConfig };
module.exports = modelController;

View File

@@ -2,11 +2,9 @@ require('dotenv').config();
const path = require('path');
require('module-alias')({ base: path.resolve(__dirname, '..') });
const cors = require('cors');
const axios = require('axios');
const express = require('express');
const passport = require('passport');
const mongoSanitize = require('express-mongo-sanitize');
const validateImageRequest = require('./middleware/validateImageRequest');
const errorController = require('./controllers/ErrorController');
const { jwtLogin, passportLogin } = require('~/strategies');
const configureSocialLogins = require('./socialLogins');
@@ -24,9 +22,6 @@ const port = Number(PORT) || 3080;
const host = HOST || 'localhost';
const startServer = async () => {
if (typeof Bun !== 'undefined') {
axios.defaults.headers.common['Accept-Encoding'] = 'gzip';
}
await connectDb();
logger.info('Connected to MongoDB');
await indexSync();
@@ -44,8 +39,7 @@ const startServer = async () => {
app.use(mongoSanitize());
app.use(express.urlencoded({ extended: true, limit: '3mb' }));
app.use(express.static(app.locals.paths.dist));
app.use(express.static(app.locals.paths.fonts));
app.use(express.static(app.locals.paths.assets));
app.use(express.static(app.locals.paths.publicPath));
app.set('trust proxy', 1); // trust first proxy
app.use(cors());
@@ -84,7 +78,6 @@ const startServer = async () => {
app.use('/api/config', routes.config);
app.use('/api/assistants', routes.assistants);
app.use('/api/files', await routes.files.initialize());
app.use('/images/', validateImageRequest, routes.staticRoute);
app.use((req, res) => {
res.status(404).sendFile(path.join(app.locals.paths.dist, 'index.html'));

View File

@@ -1,9 +1,9 @@
const { EModelEndpoint } = require('librechat-data-provider');
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
const { saveMessage, getConvo, getConvoTitle } = require('~/models');
const clearPendingReq = require('~/cache/clearPendingReq');
const abortControllers = require('./abortControllers');
const { redactMessage } = require('~/config/parsers');
const spendTokens = require('~/models/spendTokens');
const { abortRun } = require('./abortRun');
const { logger } = require('~/config');
@@ -100,15 +100,7 @@ const createAbortController = (req, res, getAbortData) => {
};
const handleAbortError = async (res, req, error, data) => {
if (error?.message?.includes('base64')) {
logger.error('[handleAbortError] Error in base64 encoding', {
...error,
stack: smartTruncateText(error?.stack, 1000),
message: truncateText(error.message, 350),
});
} else {
logger.error('[handleAbortError] AI response error; aborting request:', error);
}
logger.error('[handleAbortError] AI response error; aborting request:', error);
const { sender, conversationId, messageId, parentMessageId, partialText } = data;
if (error.stack && error.stack.includes('google')) {
@@ -117,24 +109,19 @@ const handleAbortError = async (res, req, error, data) => {
);
}
const errorText = error?.message?.includes('"type"')
? error.message
: 'An error occurred while processing your request. Please contact the Admin.';
const respondWithError = async (partialText) => {
let options = {
const options = {
sender,
messageId,
conversationId,
parentMessageId,
text: errorText,
text: redactMessage(error.message),
shouldSaveMessage: true,
user: req.user.id,
};
if (partialText) {
options = {
...options,
options.overrideProps = {
error: false,
unfinished: true,
text: partialText,

View File

@@ -1,22 +1,16 @@
const { CacheKeys, RunStatus, isUUID } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { checkMessageGaps, recordUsage } = require('~/server/services/Threads');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
const { sendMessage } = require('~/server/utils');
// const spendTokens = require('~/models/spendTokens');
const { logger } = require('~/config');
const three_minutes = 1000 * 60 * 3;
async function abortRun(req, res) {
res.setHeader('Content-Type', 'application/json');
const { abortKey } = req.body;
const [conversationId, latestMessageId] = abortKey.split(':');
const conversation = await getConvo(req.user.id, conversationId);
if (conversation?.model) {
req.body.model = conversation.model;
}
if (!isUUID.safeParse(conversationId).success) {
logger.error('[abortRun] Invalid conversationId', { conversationId });
@@ -41,9 +35,9 @@ async function abortRun(req, res) {
const { openai } = await initializeClient({ req, res });
try {
await cache.set(cacheKey, 'cancelled', three_minutes);
await cache.set(cacheKey, 'cancelled');
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('[abortRun] Cancelled run:', cancelledRun);
logger.debug('Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[abortRun] Error cancelling run', error);
if (
@@ -75,8 +69,9 @@ async function abortRun(req, res) {
});
const finalEvent = {
title: 'New Chat',
final: true,
conversation,
conversation: await getConvo(req.user.id, conversationId),
runMessages,
};

View File

@@ -1,12 +1,11 @@
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const assistants = require('~/server/services/Endpoints/assistants');
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
const { processFiles } = require('~/server/services/Files/process');
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
const anthropic = require('~/server/services/Endpoints/anthropic');
const openAI = require('~/server/services/Endpoints/openAI');
const custom = require('~/server/services/Endpoints/custom');
const google = require('~/server/services/Endpoints/google');
const assistant = require('~/server/services/Endpoints/assistant');
const buildFunction = {
[EModelEndpoint.openAI]: openAI.buildOptions,
@@ -15,10 +14,10 @@ const buildFunction = {
[EModelEndpoint.azureOpenAI]: openAI.buildOptions,
[EModelEndpoint.anthropic]: anthropic.buildOptions,
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
[EModelEndpoint.assistants]: assistants.buildOptions,
[EModelEndpoint.assistants]: assistant.buildOptions,
};
async function buildEndpointOption(req, res, next) {
function buildEndpointOption(req, res, next) {
const { endpoint, endpointType } = req.body;
const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
@@ -26,10 +25,6 @@ async function buildEndpointOption(req, res, next) {
parsedBody,
endpointType,
);
const modelsConfig = await getModelsConfig(req);
req.body.endpointOption.modelsConfig = modelsConfig;
if (req.body.files) {
// hold the promise
req.body.endpointOption.attachments = processFiles(req.body.files);

View File

@@ -1,15 +1,14 @@
const Keyv = require('keyv');
const uap = require('ua-parser-js');
const { ViolationTypes } = require('librechat-data-provider');
const { isEnabled, removePorts } = require('../utils');
const keyvRedis = require('~/cache/keyvRedis');
const denyRequest = require('./denyRequest');
const { getLogStores } = require('~/cache');
const User = require('~/models/User');
const { getLogStores } = require('../../cache');
const { isEnabled, removePorts } = require('../utils');
const keyvRedis = require('../../cache/keyvRedis');
const User = require('../../models/User');
const banCache = isEnabled(process.env.USE_REDIS)
? new Keyv({ store: keyvRedis })
: new Keyv({ namespace: ViolationTypes.BAN, ttl: 0 });
: new Keyv({ namespace: 'bans', ttl: 0 });
const message = 'Your account has been temporarily banned due to violations of our service.';
/**
@@ -29,7 +28,7 @@ const banResponse = async (req, res) => {
if (!ua.browser.name) {
return res.status(403).json({ message });
} else if (baseUrl === '/api/ask' || baseUrl === '/api/edit') {
return await denyRequest(req, res, { type: ViolationTypes.BAN });
return await denyRequest(req, res, { type: 'ban' });
}
return res.status(403).json({ message });
@@ -88,7 +87,7 @@ const checkBan = async (req, res, next = () => {}) => {
return await banResponse(req, res);
}
const banLogs = getLogStores(ViolationTypes.BAN);
const banLogs = getLogStores('ban');
const duration = banLogs.opts.ttl;
if (duration <= 0) {

View File

@@ -3,7 +3,6 @@ const checkBan = require('./checkBan');
const uaParser = require('./uaParser');
const setHeaders = require('./setHeaders');
const loginLimiter = require('./loginLimiter');
const validateModel = require('./validateModel');
const requireJwtAuth = require('./requireJwtAuth');
const uploadLimiters = require('./uploadLimiters');
const registerLimiter = require('./registerLimiter');
@@ -14,7 +13,6 @@ const concurrentLimiter = require('./concurrentLimiter');
const validateMessageReq = require('./validateMessageReq');
const buildEndpointOption = require('./buildEndpointOption');
const validateRegistration = require('./validateRegistration');
const validateImageRequest = require('./validateImageRequest');
const moderateText = require('./moderateText');
const noIndex = require('./noIndex');
@@ -34,8 +32,6 @@ module.exports = {
validateMessageReq,
buildEndpointOption,
validateRegistration,
validateImageRequest,
validateModel,
moderateText,
noIndex,
};

View File

@@ -1,7 +1,5 @@
const axios = require('axios');
const { ErrorTypes } = require('librechat-data-provider');
const denyRequest = require('./denyRequest');
const { logger } = require('~/config');
async function moderateText(req, res, next) {
if (process.env.OPENAI_MODERATION === 'true') {
@@ -25,12 +23,12 @@ async function moderateText(req, res, next) {
const flagged = results.some((result) => result.flagged);
if (flagged) {
const type = ErrorTypes.MODERATION;
const type = 'moderation';
const errorMessage = { type };
return await denyRequest(req, res, errorMessage);
}
} catch (error) {
logger.error('Error in moderateText:', error);
console.error('Error in moderateText:', error);
const errorMessage = 'error in moderation check';
return await denyRequest(req, res, errorMessage);
}

View File

@@ -1,5 +1,5 @@
const rateLimit = require('express-rate-limit');
const { ViolationTypes } = require('librechat-data-provider');
const { CacheKeys } = require('librechat-data-provider');
const logViolation = require('~/cache/logViolation');
const getEnvironmentVariables = () => {
@@ -35,7 +35,7 @@ const createFileUploadHandler = (ip = true) => {
} = getEnvironmentVariables();
return async (req, res) => {
const type = ViolationTypes.FILE_UPLOAD_LIMIT;
const type = CacheKeys.FILE_UPLOAD_LIMIT;
const errorMessage = {
type,
max: ip ? fileUploadIpMax : fileUploadUserMax,

View File

@@ -1,42 +0,0 @@
const cookies = require('cookie');
const jwt = require('jsonwebtoken');
const { logger } = require('~/config');
/**
* Middleware to validate image request.
* Must be set by `secureImageLinks` via custom config file.
*/
function validateImageRequest(req, res, next) {
if (!req.app.locals.secureImageLinks) {
return next();
}
const refreshToken = req.headers.cookie ? cookies.parse(req.headers.cookie).refreshToken : null;
if (!refreshToken) {
logger.warn('[validateImageRequest] Refresh token not provided');
return res.status(401).send('Unauthorized');
}
let payload;
try {
payload = jwt.verify(refreshToken, process.env.JWT_REFRESH_SECRET);
} catch (err) {
logger.warn('[validateImageRequest]', err);
return res.status(403).send('Access Denied');
}
const currentTimeInSeconds = Math.floor(Date.now() / 1000);
if (payload.exp < currentTimeInSeconds) {
logger.warn('[validateImageRequest] Refresh token expired');
return res.status(403).send('Access Denied');
}
if (req.path.includes(payload.id)) {
logger.debug('[validateImageRequest] Image request validated');
next();
} else {
res.status(403).send('Access Denied');
}
}
module.exports = validateImageRequest;

View File

@@ -1,47 +0,0 @@
const { ViolationTypes } = require('librechat-data-provider');
const { getModelsConfig } = require('~/server/controllers/ModelController');
const { handleError } = require('~/server/utils');
const { logViolation } = require('~/cache');
/**
* Validates the model of the request.
*
* @async
* @param {Express.Request} req - The Express request object.
* @param {Express.Response} res - The Express response object.
* @param {Function} next - The Express next function.
*/
const validateModel = async (req, res, next) => {
const { model, endpoint } = req.body;
if (!model) {
return handleError(res, { text: 'Model not provided' });
}
const modelsConfig = await getModelsConfig(req);
if (!modelsConfig) {
return handleError(res, { text: 'Models not loaded' });
}
const availableModels = modelsConfig[endpoint];
if (!availableModels) {
return handleError(res, { text: 'Endpoint models not loaded' });
}
let validModel = !!availableModels.find((availableModel) => availableModel === model);
if (validModel) {
return next();
}
const { ILLEGAL_MODEL_REQ_SCORE: score = 5 } = process.env ?? {};
const type = ViolationTypes.ILLEGAL_MODEL_REQUEST;
const errorMessage = {
type,
};
await logViolation(req, res, type, errorMessage, score);
return handleError(res, { text: 'Illegal model request' });
};
module.exports = validateModel;

View File

@@ -1,10 +1,9 @@
const express = require('express');
const AskController = require('~/server/controllers/AskController');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/anthropic');
const { initializeClient } = require('~/server/services/Endpoints/anthropic');
const {
setHeaders,
handleAbort,
validateModel,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
@@ -13,15 +12,8 @@ const router = express.Router();
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AskController(req, res, next, initializeClient, addTitle);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await AskController(req, res, next, initializeClient);
});
module.exports = router;

View File

@@ -5,7 +5,6 @@ const { addTitle } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
setHeaders,
validateModel,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
@@ -14,15 +13,8 @@ const router = express.Router();
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AskController(req, res, next, initializeClient, addTitle);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await AskController(req, res, next, initializeClient, addTitle);
});
module.exports = router;

View File

@@ -4,7 +4,6 @@ const { initializeClient } = require('~/server/services/Endpoints/google');
const {
setHeaders,
handleAbort,
validateModel,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
@@ -13,15 +12,8 @@ const router = express.Router();
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AskController(req, res, next, initializeClient);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await AskController(req, res, next, initializeClient);
});
module.exports = router;

View File

@@ -1,88 +1,81 @@
const express = require('express');
const throttle = require('lodash/throttle');
const router = express.Router();
const { getResponseSender, Constants } = require('librechat-data-provider');
const { validateTools } = require('~/app');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { sendMessage, createOnProgress } = require('~/server/utils');
const { addTitle } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
createAbortController,
handleAbortError,
setHeaders,
validateModel,
validateEndpoint,
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const { validateTools } = require('~/app');
const { logger } = require('~/config');
const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res) => {
let {
text,
endpointOption,
conversationId,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res) => {
let {
text,
endpointOption,
conversationId,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
logger.debug('[/ask/gptPlugins]', { text, conversationId, ...endpointOption });
let metadata;
let userMessage;
let promptTokens;
let userMessageId;
let responseMessageId;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model });
const newConvo = !conversationId;
const user = req.user.id;
logger.debug('[/ask/gptPlugins]', { text, conversationId, ...endpointOption });
const plugins = [];
let userMessage;
let promptTokens;
let userMessageId;
let responseMessageId;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
});
const newConvo = !conversationId;
const user = req.user.id;
const plugins = [];
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
} else if (!conversationId && key === 'conversationId') {
conversationId = data[key];
}
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
userMessageId = data[key].messageId;
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
} else if (!conversationId && key === 'conversationId') {
conversationId = data[key];
}
};
}
};
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
let streaming = null;
let timer = null;
let streaming = null;
let timer = null;
const {
onProgress: progressCallback,
sendIntermediateMessage,
getPartialText,
} = createOnProgress({
onProgress: ({ text: partialText }) => {
if (timer) {
clearTimeout(timer);
}
const {
onProgress: progressCallback,
sendIntermediateMessage,
getPartialText,
} = createOnProgress({
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
throttledSaveMessage({
if (timer) {
clearTimeout(timer);
}
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
messageId: responseMessageId,
sender,
conversationId,
@@ -94,131 +87,140 @@ router.post(
plugins,
user,
});
streaming = new Promise((resolve) => {
timer = setTimeout(() => {
resolve();
}, 250);
});
},
});
const pluginMap = new Map();
const onAgentAction = async (action, runId) => {
pluginMap.set(runId, action.tool);
sendIntermediateMessage(res, { plugins });
};
const onToolStart = async (tool, input, runId, parentRunId) => {
const pluginName = pluginMap.get(parentRunId);
const latestPlugin = {
runId,
loading: true,
inputs: [input],
latest: pluginName,
outputs: null,
};
if (streaming) {
await streaming;
}
const extraTokens = ':::plugin:::\n';
plugins.push(latestPlugin);
sendIntermediateMessage(res, { plugins }, extraTokens);
};
const onToolEnd = async (output, runId) => {
if (streaming) {
await streaming;
}
const pluginIndex = plugins.findIndex((plugin) => plugin.runId === runId);
if (pluginIndex !== -1) {
plugins[pluginIndex].loading = false;
plugins[pluginIndex].outputs = output;
if (saveDelay < 500) {
saveDelay = 500;
}
streaming = new Promise((resolve) => {
timer = setTimeout(() => {
resolve();
}, 250);
});
},
});
const pluginMap = new Map();
const onAgentAction = async (action, runId) => {
pluginMap.set(runId, action.tool);
sendIntermediateMessage(res, { plugins });
};
const onToolStart = async (tool, input, runId, parentRunId) => {
const pluginName = pluginMap.get(parentRunId);
const latestPlugin = {
runId,
loading: true,
inputs: [input],
latest: pluginName,
outputs: null,
};
const onChainEnd = () => {
saveMessage({ ...userMessage, user });
sendIntermediateMessage(res, { plugins });
};
if (streaming) {
await streaming;
}
const extraTokens = ':::plugin:::\n';
plugins.push(latestPlugin);
sendIntermediateMessage(res, { plugins }, extraTokens);
};
const getAbortData = () => ({
sender,
const onToolEnd = async (output, runId) => {
if (streaming) {
await streaming;
}
const pluginIndex = plugins.findIndex((plugin) => plugin.runId === runId);
if (pluginIndex !== -1) {
plugins[pluginIndex].loading = false;
plugins[pluginIndex].outputs = output;
}
};
const onChainEnd = () => {
saveMessage({ ...userMessage, user });
sendIntermediateMessage(res, { plugins });
};
const getAbortData = () => ({
sender,
conversationId,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
plugins: plugins.map((p) => ({ ...p, loading: false })),
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
let response = await client.sendMessage(text, {
user,
conversationId,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
plugins: plugins.map((p) => ({ ...p, loading: false })),
userMessage,
promptTokens,
parentMessageId,
overrideParentMessageId,
getReqData,
onAgentAction,
onChainEnd,
onToolStart,
onToolEnd,
onStart,
addMetadata,
getPartialText,
...endpointOption,
onProgress: progressCallback.call(null, {
res,
text,
parentMessageId: overrideParentMessageId || userMessageId,
plugins,
}),
abortController,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
let response = await client.sendMessage(text, {
user,
conversationId,
parentMessageId,
overrideParentMessageId,
getReqData,
onAgentAction,
onChainEnd,
onToolStart,
onToolEnd,
onStart,
getPartialText,
...endpointOption,
onProgress: progressCallback.call(null, {
res,
text,
parentMessageId: overrideParentMessageId || userMessageId,
plugins,
}),
abortController,
});
if (metadata) {
response = { ...response, ...metadata };
}
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
logger.debug('[/ask/gptPlugins]', response);
logger.debug('[/ask/gptPlugins]', response);
response.plugins = plugins.map((p) => ({ ...p, loading: false }));
await saveMessage({ ...response, user });
response.plugins = plugins.map((p) => ({ ...p, loading: false }));
await saveMessage({ ...response, user });
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});
res.end();
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});
res.end();
if (parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {
text,
response,
client,
});
}
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
if (parentMessageId === Constants.NO_PARENT && newConvo) {
addTitle(req, {
text,
response,
client,
});
}
},
);
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
});
}
});
module.exports = router;

View File

@@ -4,7 +4,6 @@ const { addTitle, initializeClient } = require('~/server/services/Endpoints/open
const {
handleAbort,
setHeaders,
validateModel,
validateEndpoint,
buildEndpointOption,
moderateText,
@@ -14,15 +13,8 @@ const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await AskController(req, res, next, initializeClient, addTitle);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await AskController(req, res, next, initializeClient, addTitle);
});
module.exports = router;

View File

@@ -1,11 +1,10 @@
const { v4 } = require('uuid');
const express = require('express');
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
const { actionDelimiter, EModelEndpoint } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { actionDelimiter } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { updateAction, getActions, deleteAction } = require('~/models/Action');
const { updateAssistant, getAssistant } = require('~/models/Assistant');
const { withSession } = require('~/server/utils');
const { encryptMetadata } = require('~/server/services/ActionService');
const { logger } = require('~/config');
const router = express.Router();
@@ -18,7 +17,7 @@ const router = express.Router();
*/
router.get('/', async (req, res) => {
try {
res.json(await getActions());
res.json(await getActions({ user: req.user.id }));
} catch (error) {
res.status(500).json({ error: error.message });
}
@@ -45,10 +44,7 @@ router.post('/:assistant_id', async (req, res) => {
let metadata = encryptMetadata(_metadata);
let { domain } = metadata;
/* Azure doesn't support periods in function names */
domain = await domainParser(req, domain, true);
const { domain } = metadata;
if (!domain) {
return res.status(400).json({ message: 'No domain provided' });
}
@@ -59,9 +55,9 @@ router.post('/:assistant_id', async (req, res) => {
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
initialPromises.push(getAssistant({ assistant_id }));
initialPromises.push(getAssistant({ assistant_id, user: req.user.id }));
initialPromises.push(openai.beta.assistants.retrieve(assistant_id));
!!_action_id && initialPromises.push(getActions({ action_id }, true));
!!_action_id && initialPromises.push(getActions({ user: req.user.id, action_id }, true));
/** @type {[AssistantDocument, Assistant, [Action|undefined]]} */
const [assistant_data, assistant, actions_result] = await Promise.all(initialPromises);
@@ -78,7 +74,14 @@ router.post('/:assistant_id', async (req, res) => {
const { actions: _actions = [] } = assistant_data ?? {};
const actions = [];
for (const action of _actions) {
const [_action_domain, current_action_id] = action.split(actionDelimiter);
const [action_domain, current_action_id] = action.split(actionDelimiter);
if (action_domain === domain && !_action_id) {
// TODO: dupe check on the frontend
return res.status(400).json({
message: `Action sets cannot have duplicate domains - ${domain} already exists on another action`,
});
}
if (current_action_id === action_id) {
continue;
}
@@ -111,19 +114,15 @@ router.post('/:assistant_id', async (req, res) => {
const promises = [];
promises.push(
withSession(
updateAssistant,
{ assistant_id },
updateAssistant(
{ assistant_id, user: req.user.id },
{
actions,
user: req.user.id,
},
),
);
promises.push(openai.beta.assistants.update(assistant_id, { tools }));
promises.push(
withSession(updateAction, { action_id }, { metadata, assistant_id, user: req.user.id }),
);
promises.push(updateAction({ action_id, user: req.user.id }, { metadata, assistant_id }));
/** @type {[AssistantDocument, Assistant, Action]} */
const resolved = await Promise.all(promises);
@@ -133,15 +132,6 @@ router.post('/:assistant_id', async (req, res) => {
delete resolved[2].metadata[field];
}
}
/* Map Azure OpenAI model to the assistant as defined by config */
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
resolved[1] = {
...resolved[1],
model: req.body.model,
};
}
res.json(resolved);
} catch (error) {
const message = 'Trouble updating the Assistant Action';
@@ -157,22 +147,21 @@ router.post('/:assistant_id', async (req, res) => {
* @param {string} req.params.action_id - The ID of the action to delete.
* @returns {Object} 200 - success response - application/json
*/
router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
router.delete('/:assistant_id/:action_id', async (req, res) => {
try {
const { assistant_id, action_id, model } = req.params;
req.body.model = model;
const { assistant_id, action_id } = req.params;
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const initialPromises = [];
initialPromises.push(getAssistant({ assistant_id }));
initialPromises.push(getAssistant({ assistant_id, user: req.user.id }));
initialPromises.push(openai.beta.assistants.retrieve(assistant_id));
/** @type {[AssistantDocument, Assistant]} */
const [assistant_data, assistant] = await Promise.all(initialPromises);
const { actions = [] } = assistant_data ?? {};
const { actions } = assistant_data ?? {};
const { tools = [] } = assistant ?? {};
let domain = '';
@@ -184,25 +173,21 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
return true;
});
domain = await domainParser(req, domain, true);
const updatedTools = tools.filter(
(tool) => !(tool.function && tool.function.name.includes(domain)),
);
const promises = [];
promises.push(
withSession(
updateAssistant,
{ assistant_id },
updateAssistant(
{ assistant_id, user: req.user.id },
{
actions: updatedActions,
user: req.user.id,
},
),
);
promises.push(openai.beta.assistants.update(assistant_id, { tools: updatedTools }));
promises.push(withSession(deleteAction, { action_id }));
promises.push(deleteAction({ action_id, user: req.user.id }));
await Promise.all(promises);
res.status(200).json({ message: 'Action deleted successfully' });

View File

@@ -1,14 +1,10 @@
const multer = require('multer');
const express = require('express');
const { FileContext, EModelEndpoint } = require('librechat-data-provider');
const {
initializeClient,
listAssistantsForAzure,
listAssistants,
} = require('~/server/services/Endpoints/assistants');
const { updateAssistant, getAssistants } = require('~/models/Assistant');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { uploadImageBuffer } = require('~/server/services/Files/process');
const { updateAssistant, getAssistants } = require('~/models/Assistant');
const { deleteFileByFilter } = require('~/models/File');
const { logger } = require('~/config');
const actions = require('./actions');
@@ -52,10 +48,6 @@ router.post('/', async (req, res) => {
})
.filter((tool) => tool);
if (openai.locals?.azureOptions) {
assistantData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
}
const assistant = await openai.beta.assistants.create(assistantData);
logger.debug('/assistants/', assistant);
res.status(201).json(assistant);
@@ -109,10 +101,6 @@ router.patch('/:id', async (req, res) => {
})
.filter((tool) => tool);
if (openai.locals?.azureOptions && updateData.model) {
updateData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
}
const updatedAssistant = await openai.beta.assistants.update(assistant_id, updateData);
res.json(updatedAssistant);
} catch (error) {
@@ -149,18 +137,19 @@ router.delete('/:id', async (req, res) => {
*/
router.get('/', async (req, res) => {
try {
const { limit = 100, order = 'desc', after, before } = req.query;
const query = { limit, order, after, before };
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const { limit, order, after, before } = req.query;
const response = await openai.beta.assistants.list({
limit,
order,
after,
before,
});
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
/** @type {AssistantListResponse} */
let body;
if (azureConfig?.assistants) {
body = await listAssistantsForAzure({ req, res, azureConfig, query });
} else {
({ body } = await listAssistants({ req, res, query }));
}
let body = response.body;
if (req.app.locals?.[EModelEndpoint.assistants]) {
/** @type {Partial<TAssistantEndpoint>} */
@@ -176,7 +165,7 @@ router.get('/', async (req, res) => {
res.json(body);
} catch (error) {
logger.error('[/assistants] Error listing assistants', error);
res.status(500).json({ message: 'Error listing assistants' });
res.status(500).json({ error: error.message });
}
});
@@ -213,13 +202,7 @@ router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) =>
/** @type {{ openai: OpenAI }} */
const { openai } = await initializeClient({ req, res });
const image = await uploadImageBuffer({
req,
context: FileContext.avatar,
metadata: {
buffer: req.file.buffer,
},
});
const image = await uploadImageBuffer({ req, context: FileContext.avatar });
try {
_metadata = JSON.parse(_metadata);
@@ -247,13 +230,12 @@ router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) =>
const promises = [];
promises.push(
updateAssistant(
{ assistant_id },
{ assistant_id, user: req.user.id },
{
avatar: {
filepath: image.filepath,
source: req.app.locals.fileStrategy,
},
user: req.user.id,
},
),
);

View File

@@ -1,16 +1,6 @@
const { v4 } = require('uuid');
const express = require('express');
const {
Constants,
RunStatus,
CacheKeys,
FileSources,
ContentTypes,
EModelEndpoint,
ViolationTypes,
ImageVisionTool,
AssistantStreamEvents,
} = require('librechat-data-provider');
const { EModelEndpoint, Constants, RunStatus, CacheKeys } = require('librechat-data-provider');
const {
initThread,
recordUsage,
@@ -19,23 +9,18 @@ const {
addThreadMetadata,
saveAssistantMessage,
} = require('~/server/services/Threads');
const { sendResponse, sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistants');
const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts');
const { createRun, StreamRunManager } = require('~/server/services/Runs');
const { getTransactions } = require('~/models/Transaction');
const checkBalance = require('~/models/checkBalance');
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistant');
const { createRun, sleep } = require('~/server/services/Runs');
const { getConvo } = require('~/models/Conversation');
const getLogStores = require('~/cache/getLogStores');
const { getModelMaxTokens } = require('~/utils');
const { sendMessage } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();
const {
setHeaders,
handleAbort,
validateModel,
handleAbortError,
// validateEndpoint,
buildEndpointOption,
@@ -43,8 +28,6 @@ const {
router.post('/abort', handleAbort());
const ten_minutes = 1000 * 60 * 10;
/**
* @route POST /
* @desc Chat with an assistant
@@ -53,9 +36,8 @@ const ten_minutes = 1000 * 60 * 10;
* @param {express.Response} res - The response object, used to send back a response.
* @returns {void}
*/
router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res) => {
router.post('/', buildEndpointOption, setHeaders, async (req, res) => {
logger.debug('[/assistants/chat/] req.body', req.body);
const {
text,
model,
@@ -103,16 +85,6 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
let parentMessageId = _parentId;
/** @type {TMessage[]} */
let previousMessages = [];
/** @type {import('librechat-data-provider').TConversation | null} */
let conversation = null;
/** @type {string[]} */
let file_ids = [];
/** @type {Set<string>} */
let attachedFileIds = new Set();
/** @type {TMessage | null} */
let requestMessage = null;
/** @type {undefined | Promise<ChatCompletion>} */
let visionPromise;
const userMessageId = v4();
const responseMessageId = v4();
@@ -123,194 +95,15 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
const cache = getLogStores(CacheKeys.ABORT_KEYS);
const cacheKey = `${req.user.id}:${conversationId}`;
/** @type {Run | undefined} - The completed run, undefined if incomplete */
let completedRun;
const handleError = async (error) => {
const defaultErrorMessage =
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
const messageData = {
thread_id,
assistant_id,
conversationId,
parentMessageId,
sender: 'System',
user: req.user.id,
shouldSaveMessage: false,
messageId: responseMessageId,
endpoint: EModelEndpoint.assistants,
};
if (error.message === 'Run cancelled') {
return res.end();
} else if (error.message === 'Request closed' && completedRun) {
return;
} else if (error.message === 'Request closed') {
logger.debug('[/assistants/chat/] Request aborted on close');
} else if (/Files.*are invalid/.test(error.message)) {
const errorMessage = `Files are invalid, or may not have uploaded yet.${
req.app.locals?.[EModelEndpoint.azureOpenAI].assistants
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
: ''
}`;
return sendResponse(res, messageData, errorMessage);
} else if (error?.message?.includes('string too long')) {
return sendResponse(
res,
messageData,
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
);
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
return sendResponse(res, messageData, error.message);
} else {
logger.error('[/assistants/chat/]', error);
}
if (!openai || !thread_id || !run_id) {
return sendResponse(res, messageData, defaultErrorMessage);
}
await sleep(2000);
try {
const status = await cache.get(cacheKey);
if (status === 'cancelled') {
logger.debug('[/assistants/chat/] Run already cancelled');
return res.end();
}
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[/assistants/chat/] Error cancelling run', error);
}
await sleep(2000);
let run;
try {
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error('[/assistants/chat/] Error fetching or processing run', error);
}
let finalEvent;
try {
const runMessages = await checkMessageGaps({
openai,
run_id,
thread_id,
conversationId,
latestMessageId: responseMessageId,
});
const errorContentPart = {
text: {
value:
error?.message ?? 'There was an error processing your request. Please try again later.',
},
type: ContentTypes.ERROR,
};
if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) {
runMessages[runMessages.length - 1].content = [errorContentPart];
} else {
const contentParts = runMessages[runMessages.length - 1].content;
for (let i = 0; i < contentParts.length; i++) {
const currentPart = contentParts[i];
/** @type {CodeToolCall | RetrievalToolCall | FunctionToolCall | undefined} */
const toolCall = currentPart?.[ContentTypes.TOOL_CALL];
if (
toolCall &&
toolCall?.function &&
!(toolCall?.function?.output || toolCall?.function?.output?.length)
) {
contentParts[i] = {
...currentPart,
[ContentTypes.TOOL_CALL]: {
...toolCall,
function: {
...toolCall.function,
output: 'error processing tool',
},
},
};
}
}
runMessages[runMessages.length - 1].content.push(errorContentPart);
}
finalEvent = {
final: true,
conversation: await getConvo(req.user.id, conversationId),
runMessages,
};
} catch (error) {
logger.error('[/assistants/chat/] Error finalizing error process', error);
return sendResponse(res, messageData, 'The Assistant run failed');
}
return sendResponse(res, finalEvent);
};
try {
res.on('close', async () => {
if (!completedRun) {
await handleError(new Error('Request closed'));
}
});
if (convoId && !_thread_id) {
completedRun = true;
throw new Error('Missing thread_id for existing conversation');
}
if (!assistant_id) {
completedRun = true;
throw new Error('Missing assistant_id');
}
const checkBalanceBeforeRun = async () => {
if (!isEnabled(process.env.CHECK_BALANCE)) {
return;
}
const transactions =
(await getTransactions({
user: req.user.id,
context: 'message',
conversationId,
})) ?? [];
const totalPreviousTokens = Math.abs(
transactions.reduce((acc, curr) => acc + curr.rawAmount, 0),
);
// TODO: make promptBuffer a config option; buffer for titles, needs buffer for system instructions
const promptBuffer = parentMessageId === Constants.NO_PARENT && !_thread_id ? 200 : 0;
// 5 is added for labels
let promptTokens = (await countTokens(text + (promptPrefix ?? ''))) + 5;
promptTokens += totalPreviousTokens + promptBuffer;
// Count tokens up to the current context window
promptTokens = Math.min(promptTokens, getModelMaxTokens(model));
await checkBalance({
req,
res,
txData: {
model,
user: req.user.id,
tokenType: 'prompt',
amount: promptTokens,
},
});
};
/** @type {{ openai: OpenAIClient }} */
const { openai: _openai, client } = await initializeClient({
req,
@@ -321,11 +114,15 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
openai = _openai;
// if (thread_id) {
// previousMessages = await checkMessageGaps({ openai, thread_id, conversationId });
// }
if (previousMessages.length) {
parentMessageId = previousMessages[previousMessages.length - 1].messageId;
}
let userMessage = {
const userMessage = {
role: 'user',
content: text,
metadata: {
@@ -333,7 +130,75 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
},
};
/** @type {CreateRunBody | undefined} */
let thread_file_ids = [];
if (convoId) {
const convo = await getConvo(req.user.id, convoId);
if (convo && convo.file_ids) {
thread_file_ids = convo.file_ids;
}
}
const file_ids = files.map(({ file_id }) => file_id);
if (file_ids.length || thread_file_ids.length) {
userMessage.file_ids = file_ids;
openai.attachedFileIds = new Set([...file_ids, ...thread_file_ids]);
}
// TODO: may allow multiple messages to be created beforehand in a future update
const initThreadBody = {
messages: [userMessage],
metadata: {
user: req.user.id,
conversationId,
},
};
const result = await initThread({ openai, body: initThreadBody, thread_id });
thread_id = result.thread_id;
createOnTextProgress({
openai,
conversationId,
userMessageId,
messageId: responseMessageId,
thread_id,
});
const requestMessage = {
user: req.user.id,
text,
messageId: userMessageId,
parentMessageId,
// TODO: make sure client sends correct format for `files`, use zod
files,
file_ids,
conversationId,
isCreatedByUser: true,
assistant_id,
thread_id,
model: assistant_id,
};
previousMessages.push(requestMessage);
await saveUserMessage({ ...requestMessage, model });
const conversation = {
conversationId,
// TODO: title feature
title: 'New Chat',
endpoint: EModelEndpoint.assistants,
promptPrefix: promptPrefix,
instructions: instructions,
assistant_id,
// model,
};
if (file_ids.length) {
conversation.file_ids = file_ids;
}
/** @type {CreateRunBody} */
const body = {
assistant_id,
model,
@@ -347,255 +212,51 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
body.instructions = instructions;
}
const getRequestFileIds = async () => {
let thread_file_ids = [];
if (convoId) {
const convo = await getConvo(req.user.id, convoId);
if (convo && convo.file_ids) {
thread_file_ids = convo.file_ids;
}
}
/* NOTE:
* By default, a Run will use the model and tools configuration specified in Assistant object,
* but you can override most of these when creating the Run for added flexibility:
*/
const run = await createRun({
openai,
thread_id,
body,
});
file_ids = files.map(({ file_id }) => file_id);
if (file_ids.length || thread_file_ids.length) {
userMessage.file_ids = file_ids;
attachedFileIds = new Set([...file_ids, ...thread_file_ids]);
}
};
run_id = run.id;
await cache.set(cacheKey, `${thread_id}:${run_id}`);
const addVisionPrompt = async () => {
if (!req.body.endpointOption.attachments) {
return;
}
/** @type {MongoFile[]} */
const attachments = await req.body.endpointOption.attachments;
if (
attachments &&
attachments.every((attachment) => attachment.source === FileSources.openai)
) {
return;
}
const assistant = await openai.beta.assistants.retrieve(assistant_id);
const visionToolIndex = assistant.tools.findIndex(
(tool) => tool?.function && tool?.function?.name === ImageVisionTool.function.name,
);
if (visionToolIndex === -1) {
return;
}
let visionMessage = {
role: 'user',
content: '',
};
const files = await client.addImageURLs(visionMessage, attachments);
if (!visionMessage.image_urls?.length) {
return;
}
const imageCount = visionMessage.image_urls.length;
const plural = imageCount > 1;
visionMessage.content = createVisionPrompt(plural);
visionMessage = formatMessage({ message: visionMessage, endpoint: EModelEndpoint.openAI });
visionPromise = openai.chat.completions.create({
model: 'gpt-4-vision-preview',
messages: [visionMessage],
max_tokens: 4000,
});
const pluralized = plural ? 's' : '';
body.additional_instructions = `${
body.additional_instructions ? `${body.additional_instructions}\n` : ''
}The user has uploaded ${imageCount} image${pluralized}.
Use the \`${ImageVisionTool.function.name}\` tool to retrieve ${
plural ? '' : 'a '
}detailed text description${pluralized} for ${plural ? 'each' : 'the'} image${pluralized}.`;
return files;
};
const initializeThread = async () => {
/** @type {[ undefined | MongoFile[]]}*/
const [processedFiles] = await Promise.all([addVisionPrompt(), getRequestFileIds()]);
// TODO: may allow multiple messages to be created beforehand in a future update
const initThreadBody = {
messages: [userMessage],
metadata: {
user: req.user.id,
conversationId,
},
};
if (processedFiles) {
for (const file of processedFiles) {
if (file.source !== FileSources.openai) {
attachedFileIds.delete(file.file_id);
const index = file_ids.indexOf(file.file_id);
if (index > -1) {
file_ids.splice(index, 1);
}
}
}
userMessage.file_ids = file_ids;
}
const result = await initThread({ openai, body: initThreadBody, thread_id });
thread_id = result.thread_id;
createOnTextProgress({
openai,
conversationId,
userMessageId,
messageId: responseMessageId,
thread_id,
});
requestMessage = {
sendMessage(res, {
sync: true,
conversationId,
// messages: previousMessages,
requestMessage,
responseMessage: {
user: req.user.id,
text,
messageId: userMessageId,
parentMessageId,
// TODO: make sure client sends correct format for `files`, use zod
files,
file_ids,
messageId: openai.responseMessage.messageId,
parentMessageId: userMessageId,
conversationId,
isCreatedByUser: true,
assistant_id,
thread_id,
model: assistant_id,
};
previousMessages.push(requestMessage);
/* asynchronous */
saveUserMessage({ ...requestMessage, model });
conversation = {
conversationId,
endpoint: EModelEndpoint.assistants,
promptPrefix: promptPrefix,
instructions: instructions,
assistant_id,
// model,
};
if (file_ids.length) {
conversation.file_ids = file_ids;
}
};
const promises = [initializeThread(), checkBalanceBeforeRun()];
await Promise.all(promises);
const sendInitialResponse = () => {
sendMessage(res, {
sync: true,
conversationId,
// messages: previousMessages,
requestMessage,
responseMessage: {
user: req.user.id,
messageId: openai.responseMessage.messageId,
parentMessageId: userMessageId,
conversationId,
assistant_id,
thread_id,
model: assistant_id,
},
});
};
/** @type {RunResponse | typeof StreamRunManager | undefined} */
let response;
const processRun = async (retry = false) => {
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
openai.attachedFileIds = attachedFileIds;
openai.visionPromise = visionPromise;
if (retry) {
response = await runAssistant({
openai,
thread_id,
run_id,
in_progress: openai.in_progress,
});
return;
}
/* NOTE:
* By default, a Run will use the model and tools configuration specified in Assistant object,
* but you can override most of these when creating the Run for added flexibility:
*/
const run = await createRun({
openai,
thread_id,
body,
});
run_id = run.id;
await cache.set(cacheKey, `${thread_id}:${run_id}`, ten_minutes);
sendInitialResponse();
// todo: retry logic
response = await runAssistant({ openai, thread_id, run_id });
return;
}
/** @type {{[AssistantStreamEvents.ThreadRunCreated]: (event: ThreadRunCreated) => Promise<void>}} */
const handlers = {
[AssistantStreamEvents.ThreadRunCreated]: async (event) => {
await cache.set(cacheKey, `${thread_id}:${event.data.id}`, ten_minutes);
run_id = event.data.id;
sendInitialResponse();
},
};
const streamRunManager = new StreamRunManager({
req,
res,
openai,
handlers,
thread_id,
visionPromise,
attachedFileIds,
responseMessage: openai.responseMessage,
// streamOptions: {
// },
});
await streamRunManager.runAssistant({
thread_id,
body,
});
response = streamRunManager;
};
await processRun();
logger.debug('[/assistants/chat/] response', {
run: response.run,
steps: response.steps,
},
});
if (response.run.status === RunStatus.CANCELLED) {
logger.debug('[/assistants/chat/] Run cancelled, handled by `abortRun`');
return res.end();
}
// todo: retry logic
let response = await runAssistant({ openai, thread_id, run_id });
logger.debug('[/assistants/chat/] response', response);
if (response.run.status === RunStatus.IN_PROGRESS) {
processRun(true);
response = await runAssistant({
openai,
thread_id,
run_id,
in_progress: openai.in_progress,
});
}
completedRun = response.run;
/** @type {ResponseMessage} */
const responseMessage = {
...(response.responseMessage ?? response.finalMessage),
...openai.responseMessage,
parentMessageId: userMessageId,
conversationId,
user: req.user.id,
@@ -604,7 +265,11 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
model: assistant_id,
};
// TODO: token count from usage returned in run
// TODO: parse responses, save to db, send to user
sendMessage(res, {
title: 'New Chat',
final: true,
conversation,
requestMessage: {
@@ -619,7 +284,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
addTitle(req, {
text,
responseText: response.text,
responseText: openai.responseText,
conversationId,
client,
});
@@ -634,7 +299,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
if (!response.run.usage) {
await sleep(3000);
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
const completedRun = await openai.beta.threads.runs.retrieve(thread_id, run.id);
if (completedRun.usage) {
await recordUsage({
...completedRun.usage,
@@ -652,7 +317,62 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
});
}
} catch (error) {
await handleError(error);
if (error.message === 'Run cancelled') {
return res.end();
}
logger.error('[/assistants/chat/]', error);
if (!openai || !thread_id || !run_id) {
return res.status(500).json({ error: 'The Assistant run failed to initialize' });
}
try {
await cache.delete(cacheKey);
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
logger.debug('Cancelled run:', cancelledRun);
} catch (error) {
logger.error('[abortRun] Error cancelling run', error);
}
await sleep(2000);
try {
const run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
await recordUsage({
...run.usage,
model: run.model,
user: req.user.id,
conversationId,
});
} catch (error) {
logger.error('[/assistants/chat/] Error fetching or processing run', error);
}
try {
const runMessages = await checkMessageGaps({
openai,
run_id,
thread_id,
conversationId,
latestMessageId: responseMessageId,
});
const finalEvent = {
title: 'New Chat',
final: true,
conversation: await getConvo(req.user.id, conversationId),
runMessages,
};
if (res.headersSent && finalEvent) {
return sendMessage(res, finalEvent);
}
res.json(finalEvent);
} catch (error) {
logger.error('[/assistants/chat/] Error finalizing error process', error);
return res.status(500).json({ error: 'The Assistant run failed' });
}
}
});

View File

@@ -43,8 +43,6 @@ router.get('/', async function (req, res) {
isBirthday() ||
isEnabled(process.env.SHOW_BIRTHDAY_ICON) ||
process.env.SHOW_BIRTHDAY_ICON === '',
helpAndFaqURL: process.env.HELP_AND_FAQ_URL || 'https://librechat.ai',
interface: req.app.locals.interface,
};
if (typeof process.env.CUSTOM_FOOTER === 'string') {

View File

@@ -1,10 +1,10 @@
const express = require('express');
const { CacheKeys } = require('librechat-data-provider');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { initializeClient } = require('~/server/services/Endpoints/assistant');
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
const { sleep } = require('~/server/services/Runs/handle');
const getLogStores = require('~/cache/getLogStores');
const { sleep } = require('~/server/utils');
const { logger } = require('~/config');
const router = express.Router();

View File

@@ -4,7 +4,6 @@ const { initializeClient } = require('~/server/services/Endpoints/anthropic');
const {
setHeaders,
handleAbort,
validateModel,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
@@ -13,15 +12,8 @@ const router = express.Router();
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await EditController(req, res, next, initializeClient);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await EditController(req, res, next, initializeClient);
});
module.exports = router;

View File

@@ -5,7 +5,6 @@ const { addTitle } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
setHeaders,
validateModel,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
@@ -14,15 +13,8 @@ const router = express.Router();
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await EditController(req, res, next, initializeClient, addTitle);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await EditController(req, res, next, initializeClient, addTitle);
});
module.exports = router;

View File

@@ -4,7 +4,6 @@ const { initializeClient } = require('~/server/services/Endpoints/google');
const {
setHeaders,
handleAbort,
validateModel,
validateEndpoint,
buildEndpointOption,
} = require('~/server/middleware');
@@ -13,15 +12,8 @@ const router = express.Router();
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await EditController(req, res, next, initializeClient);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await EditController(req, res, next, initializeClient);
});
module.exports = router;

View File

@@ -1,94 +1,88 @@
const express = require('express');
const throttle = require('lodash/throttle');
const router = express.Router();
const { validateTools } = require('~/app');
const { getResponseSender } = require('librechat-data-provider');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
const {
handleAbort,
createAbortController,
handleAbortError,
setHeaders,
validateModel,
validateEndpoint,
buildEndpointOption,
moderateText,
} = require('~/server/middleware');
const { sendMessage, createOnProgress, formatSteps, formatAction } = require('~/server/utils');
const { initializeClient } = require('~/server/services/Endpoints/gptPlugins');
const { saveMessage, getConvoTitle, getConvo } = require('~/models');
const { validateTools } = require('~/app');
const { logger } = require('~/config');
const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res) => {
let {
text,
generation,
endpointOption,
conversationId,
responseMessageId,
isContinued = false,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res) => {
let {
text,
generation,
endpointOption,
conversationId,
responseMessageId,
isContinued = false,
parentMessageId = null,
overrideParentMessageId = null,
} = req.body;
logger.debug('[/edit/gptPlugins]', {
text,
generation,
isContinued,
conversationId,
...endpointOption,
});
logger.debug('[/edit/gptPlugins]', {
text,
generation,
isContinued,
conversationId,
...endpointOption,
});
let metadata;
let userMessage;
let promptTokens;
let lastSavedTimestamp = 0;
let saveDelay = 100;
const sender = getResponseSender({ ...endpointOption, model: endpointOption.modelOptions.model });
const userMessageId = parentMessageId;
const user = req.user.id;
let userMessage;
let promptTokens;
const sender = getResponseSender({
...endpointOption,
model: endpointOption.modelOptions.model,
});
const userMessageId = parentMessageId;
const user = req.user.id;
const plugin = {
loading: true,
inputs: [],
latest: null,
outputs: null,
};
const plugin = {
loading: true,
inputs: [],
latest: null,
outputs: null,
};
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
}
const addMetadata = (data) => (metadata = data);
const getReqData = (data = {}) => {
for (let key in data) {
if (key === 'userMessage') {
userMessage = data[key];
} else if (key === 'responseMessageId') {
responseMessageId = data[key];
} else if (key === 'promptTokens') {
promptTokens = data[key];
}
};
}
};
const throttledSaveMessage = throttle(saveMessage, 3000, { trailing: false });
const {
onProgress: progressCallback,
sendIntermediateMessage,
getPartialText,
} = createOnProgress({
generation,
onProgress: ({ text: partialText }) => {
if (plugin.loading === true) {
plugin.loading = false;
}
const {
onProgress: progressCallback,
sendIntermediateMessage,
getPartialText,
} = createOnProgress({
generation,
onProgress: ({ text: partialText }) => {
const currentTimestamp = Date.now();
throttledSaveMessage({
if (plugin.loading === true) {
plugin.loading = false;
}
if (currentTimestamp - lastSavedTimestamp > saveDelay) {
lastSavedTimestamp = currentTimestamp;
saveMessage({
messageId: responseMessageId,
sender,
conversationId,
@@ -100,95 +94,104 @@ router.post(
error: false,
user,
});
},
});
const onAgentAction = (action, start = false) => {
const formattedAction = formatAction(action);
plugin.inputs.push(formattedAction);
plugin.latest = formattedAction.plugin;
if (!start) {
saveMessage({ ...userMessage, user });
}
sendIntermediateMessage(res, { plugin });
// logger.debug('PLUGIN ACTION', formattedAction);
};
const onChainEnd = (data) => {
let { intermediateSteps: steps } = data;
plugin.outputs = steps && steps[0].action ? formatSteps(steps) : 'An error occurred.';
plugin.loading = false;
if (saveDelay < 500) {
saveDelay = 500;
}
},
});
const onAgentAction = (action, start = false) => {
const formattedAction = formatAction(action);
plugin.inputs.push(formattedAction);
plugin.latest = formattedAction.plugin;
if (!start) {
saveMessage({ ...userMessage, user });
sendIntermediateMessage(res, { plugin });
// logger.debug('CHAIN END', plugin.outputs);
};
const getAbortData = () => ({
sender,
conversationId,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
plugin: { ...plugin, loading: false },
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
let response = await client.sendMessage(text, {
user,
generation,
isContinued,
isEdited: true,
conversationId,
parentMessageId,
responseMessageId,
overrideParentMessageId,
getReqData,
onAgentAction,
onChainEnd,
onStart,
...endpointOption,
onProgress: progressCallback.call(null, {
res,
text,
plugin,
parentMessageId: overrideParentMessageId || userMessageId,
}),
abortController,
});
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
response.plugin = { ...plugin, loading: false };
await saveMessage({ ...response, user });
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});
res.end();
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
});
}
},
);
sendIntermediateMessage(res, { plugin });
// logger.debug('PLUGIN ACTION', formattedAction);
};
const onChainEnd = (data) => {
let { intermediateSteps: steps } = data;
plugin.outputs = steps && steps[0].action ? formatSteps(steps) : 'An error occurred.';
plugin.loading = false;
saveMessage({ ...userMessage, user });
sendIntermediateMessage(res, { plugin });
// logger.debug('CHAIN END', plugin.outputs);
};
const getAbortData = () => ({
sender,
conversationId,
messageId: responseMessageId,
parentMessageId: overrideParentMessageId ?? userMessageId,
text: getPartialText(),
plugin: { ...plugin, loading: false },
userMessage,
promptTokens,
});
const { abortController, onStart } = createAbortController(req, res, getAbortData);
try {
endpointOption.tools = await validateTools(user, endpointOption.tools);
const { client } = await initializeClient({ req, res, endpointOption });
let response = await client.sendMessage(text, {
user,
generation,
isContinued,
isEdited: true,
conversationId,
parentMessageId,
responseMessageId,
overrideParentMessageId,
getReqData,
onAgentAction,
onChainEnd,
onStart,
addMetadata,
...endpointOption,
onProgress: progressCallback.call(null, {
res,
text,
plugin,
parentMessageId: overrideParentMessageId || userMessageId,
}),
abortController,
});
if (overrideParentMessageId) {
response.parentMessageId = overrideParentMessageId;
}
if (metadata) {
response = { ...response, ...metadata };
}
logger.debug('[/edit/gptPlugins] CLIENT RESPONSE', response);
response.plugin = { ...plugin, loading: false };
await saveMessage({ ...response, user });
sendMessage(res, {
title: await getConvoTitle(user, conversationId),
final: true,
conversation: await getConvo(user, conversationId),
requestMessage: userMessage,
responseMessage: response,
});
res.end();
} catch (error) {
const partialText = getPartialText();
handleAbortError(res, req, error, {
partialText,
conversationId,
sender,
messageId: responseMessageId,
parentMessageId: userMessageId ?? parentMessageId,
});
}
});
module.exports = router;

View File

@@ -4,7 +4,6 @@ const { initializeClient } = require('~/server/services/Endpoints/openAI');
const {
handleAbort,
setHeaders,
validateModel,
validateEndpoint,
buildEndpointOption,
moderateText,
@@ -14,15 +13,8 @@ const router = express.Router();
router.use(moderateText);
router.post('/abort', handleAbort());
router.post(
'/',
validateEndpoint,
validateModel,
buildEndpointOption,
setHeaders,
async (req, res, next) => {
await EditController(req, res, next, initializeClient);
},
);
router.post('/', validateEndpoint, buildEndpointOption, setHeaders, async (req, res, next) => {
await EditController(req, res, next, initializeClient);
});
module.exports = router;

View File

@@ -18,15 +18,13 @@ router.post('/', upload.single('input'), async (req, res) => {
}
const fileStrategy = req.app.locals.fileStrategy;
const desiredFormat = req.app.locals.imageOutputType;
const resizedBuffer = await resizeAvatar({
const webPBuffer = await resizeAvatar({
userId,
input,
desiredFormat,
});
const { processAvatar } = getStrategyFunctions(fileStrategy);
const url = await processAvatar({ buffer: resizedBuffer, userId, manual });
const url = await processAvatar({ buffer: webPBuffer, userId, manual });
res.json({ url });
} catch (error) {

View File

@@ -1,13 +1,12 @@
const axios = require('axios');
const fs = require('fs').promises;
const express = require('express');
const { isUUID, FileSources } = require('librechat-data-provider');
const { isUUID } = require('librechat-data-provider');
const {
filterFile,
processFileUpload,
processDeleteRequest,
} = require('~/server/services/Files/process');
const { initializeClient } = require('~/server/services/Endpoints/assistants');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { getFiles } = require('~/models/File');
const { logger } = require('~/config');
@@ -45,7 +44,7 @@ router.delete('/', async (req, res) => {
return false;
}
if (/^(file|assistant)-/.test(file.file_id)) {
if (/^file-/.test(file.file_id)) {
return true;
}
@@ -66,65 +65,28 @@ router.delete('/', async (req, res) => {
}
});
router.get('/download/:userId/:file_id', async (req, res) => {
router.get('/download/:fileId', async (req, res) => {
try {
const { userId, file_id } = req.params;
logger.debug(`File download requested by user ${userId}: ${file_id}`);
const { fileId } = req.params;
if (userId !== req.user.id) {
logger.warn(`${errorPrefix} forbidden: ${file_id}`);
return res.status(403).send('Forbidden');
}
const [file] = await getFiles({ file_id });
const errorPrefix = `File download requested by user ${userId}`;
if (!file) {
logger.warn(`${errorPrefix} not found: ${file_id}`);
return res.status(404).send('File not found');
}
if (!file.filepath.includes(userId)) {
logger.warn(`${errorPrefix} forbidden: ${file_id}`);
return res.status(403).send('Forbidden');
}
if (file.source === FileSources.openai && !file.model) {
logger.warn(`${errorPrefix} has no associated model: ${file_id}`);
return res.status(400).send('The model used when creating this file is not available');
}
const { getDownloadStream } = getStrategyFunctions(file.source);
if (!getDownloadStream) {
logger.warn(`${errorPrefix} has no stream method implemented: ${file.source}`);
return res.status(501).send('Not Implemented');
}
const setHeaders = () => {
res.setHeader('Content-Disposition', `attachment; filename="${file.filename}"`);
res.setHeader('Content-Type', 'application/octet-stream');
res.setHeader('X-File-Metadata', JSON.stringify(file));
const options = {
headers: {
// TODO: Client initialization for OpenAI API Authentication
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
responseType: 'stream',
};
/** @type {{ body: import('stream').PassThrough } | undefined} */
let passThrough;
/** @type {ReadableStream | undefined} */
let fileStream;
if (file.source === FileSources.openai) {
req.body = { model: file.model };
const { openai } = await initializeClient({ req, res });
logger.debug(`Downloading file ${file_id} from OpenAI`);
passThrough = await getDownloadStream(file_id, openai);
setHeaders();
logger.debug(`File ${file_id} downloaded from OpenAI`);
passThrough.body.pipe(res);
} else {
fileStream = getDownloadStream(file_id);
setHeaders();
fileStream.pipe(res);
}
const fileResponse = await axios.get(`https://api.openai.com/v1/files/${fileId}`, {
headers: options.headers,
});
const { filename } = fileResponse.data;
const response = await axios.get(`https://api.openai.com/v1/files/${fileId}/content`, options);
res.setHeader('Content-Disposition', `attachment; filename="${filename}"`);
response.data.pipe(res);
} catch (error) {
logger.error('Error downloading file:', error);
console.error('Error downloading file:', error);
res.status(500).send('Error downloading file');
}
});

View File

@@ -15,7 +15,6 @@ const storage = multer.diskStorage({
},
filename: function (req, file, cb) {
req.file_id = crypto.randomUUID();
file.originalname = decodeURIComponent(file.originalname);
cb(null, `${file.originalname}`);
},
});

View File

@@ -17,7 +17,6 @@ const user = require('./user');
const config = require('./config');
const assistants = require('./assistants');
const files = require('./files');
const staticRoute = require('./static');
module.exports = {
search,
@@ -39,5 +38,4 @@ module.exports = {
config,
assistants,
files,
staticRoute,
};

View File

@@ -1,8 +1,8 @@
const express = require('express');
const { modelController } = require('~/server/controllers/ModelController');
const { requireJwtAuth } = require('~/server/middleware/');
const router = express.Router();
router.get('/', requireJwtAuth, modelController);
const controller = require('../controllers/ModelController');
const { requireJwtAuth } = require('../middleware/');
router.get('/', requireJwtAuth, controller);
module.exports = router;

View File

@@ -1,7 +0,0 @@
const express = require('express');
const paths = require('~/config/paths');
const router = express.Router();
router.use(express.static(paths.imageOutput));
module.exports = router;

Some files were not shown because too many files have changed in this diff Show More