Compare commits
80 Commits
v0.7.1
...
v2-assista
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1c5e827ce9 | ||
|
|
84f68f9a15 | ||
|
|
fb6e87c36d | ||
|
|
bce0584c67 | ||
|
|
5c15b60601 | ||
|
|
e55a89270e | ||
|
|
3eabbd572e | ||
|
|
6082e26716 | ||
|
|
bc46ccdcad | ||
|
|
2bdbff5141 | ||
|
|
f0e8cca5df | ||
|
|
38ad36c1c5 | ||
|
|
53fe2f6453 | ||
|
|
31479d6a48 | ||
|
|
612a58737d | ||
|
|
8a7f36f581 | ||
|
|
4a5d06a774 | ||
|
|
fc9368e0e7 | ||
|
|
64bf0800a0 | ||
|
|
94eeec354e | ||
|
|
e42709bd1f | ||
|
|
638ac5bba6 | ||
|
|
5920672a8c | ||
|
|
a0d1e2a5f8 | ||
|
|
4ffc1414a8 | ||
|
|
df6183db0f | ||
|
|
3816219936 | ||
|
|
6fc664e4a3 | ||
|
|
89899164ed | ||
|
|
c83d9d61d4 | ||
|
|
bcdddaed72 | ||
|
|
a4de635719 | ||
|
|
4a32d7466a | ||
|
|
2ec821ea4c | ||
|
|
978009787c | ||
|
|
27e7621b6a | ||
|
|
2b37a44b8d | ||
|
|
98c96cd020 | ||
|
|
8f20fb28e5 | ||
|
|
d73ea8e1f2 | ||
|
|
83bae9e9d9 | ||
|
|
6ba7f60eec | ||
|
|
5293b73b6d | ||
|
|
b6d1f5fa53 | ||
|
|
c94278be85 | ||
|
|
3c5fa40435 | ||
|
|
b6d6343f54 | ||
|
|
89b1e33be0 | ||
|
|
436f7195b5 | ||
|
|
2aec4a6250 | ||
|
|
b77bd19092 | ||
|
|
446ffe0417 | ||
|
|
b9bcaee656 | ||
|
|
110c0535fb | ||
|
|
25fceb78b7 | ||
|
|
c8baceac76 | ||
|
|
a0288f1c5c | ||
|
|
5d3c90be26 | ||
|
|
ab6fbe48f1 | ||
|
|
3b44741cf9 | ||
|
|
d21a05606e | ||
|
|
0e50c07e3f | ||
|
|
a5cac03fa4 | ||
|
|
ba4fa6150e | ||
|
|
463ca5d613 | ||
|
|
039c7ae880 | ||
|
|
63ef15ab63 | ||
|
|
8a78500fe2 | ||
|
|
144fd5f6aa | ||
|
|
2720327aa1 | ||
|
|
4d0806d3e8 | ||
|
|
5b5f9b950b | ||
|
|
3ccff19821 | ||
|
|
11d5e232b3 | ||
|
|
099aa9dead | ||
|
|
4121818124 | ||
|
|
ca9a0fe629 | ||
|
|
bde6bb0152 | ||
|
|
667f5f91fe | ||
|
|
75da75be08 |
57
.env.example
57
.env.example
@@ -2,11 +2,9 @@
|
||||
# LibreChat Configuration #
|
||||
#=====================================================================#
|
||||
# Please refer to the reference documentation for assistance #
|
||||
# with configuring your LibreChat environment. The guide is #
|
||||
# available both online and within your local LibreChat #
|
||||
# directory: #
|
||||
# Online: https://docs.librechat.ai/install/configuration/dotenv.html #
|
||||
# Locally: ./docs/install/configuration/dotenv.md #
|
||||
# with configuring your LibreChat environment. #
|
||||
# #
|
||||
# https://www.librechat.ai/docs/configuration/dotenv #
|
||||
#=====================================================================#
|
||||
|
||||
#==================================================#
|
||||
@@ -62,15 +60,17 @@ PROXY=
|
||||
#===================================#
|
||||
# Known Endpoints - librechat.yaml #
|
||||
#===================================#
|
||||
# https://docs.librechat.ai/install/configuration/ai_endpoints.html
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints
|
||||
|
||||
# GROQ_API_KEY=
|
||||
# SHUTTLEAI_KEY=
|
||||
# OPENROUTER_KEY=
|
||||
# MISTRAL_API_KEY=
|
||||
# ANYSCALE_API_KEY=
|
||||
# APIPIE_API_KEY=
|
||||
# FIREWORKS_API_KEY=
|
||||
# GROQ_API_KEY=
|
||||
# HUGGINGFACE_TOKEN=
|
||||
# MISTRAL_API_KEY=
|
||||
# OPENROUTER_KEY=
|
||||
# PERPLEXITY_API_KEY=
|
||||
# SHUTTLEAI_API_KEY=
|
||||
# TOGETHERAI_API_KEY=
|
||||
|
||||
#============#
|
||||
@@ -113,15 +113,34 @@ BINGAI_TOKEN=user_provided
|
||||
#============#
|
||||
|
||||
GOOGLE_KEY=user_provided
|
||||
# GOOGLE_MODELS=gemini-pro,gemini-pro-vision,chat-bison,chat-bison-32k,codechat-bison,codechat-bison-32k,text-bison,text-bison-32k,text-unicorn,code-gecko,code-bison,code-bison-32k
|
||||
# GOOGLE_REVERSE_PROXY=
|
||||
|
||||
# Gemini API
|
||||
# GOOGLE_MODELS=gemini-1.5-flash-latest,gemini-1.0-pro,gemini-1.0-pro-001,gemini-1.0-pro-latest,gemini-1.0-pro-vision-latest,gemini-1.5-pro-latest,gemini-pro,gemini-pro-vision
|
||||
|
||||
# Vertex AI
|
||||
# GOOGLE_MODELS=gemini-1.5-flash-preview-0514,gemini-1.5-pro-preview-0409,gemini-1.0-pro-vision-001,gemini-pro,gemini-pro-vision,chat-bison,chat-bison-32k,codechat-bison,codechat-bison-32k,text-bison,text-bison-32k,text-unicorn,code-gecko,code-bison,code-bison-32k
|
||||
|
||||
# Google Gemini Safety Settings
|
||||
# NOTE (Vertex AI): You do not have access to the BLOCK_NONE setting by default.
|
||||
# To use this restricted HarmBlockThreshold setting, you will need to either:
|
||||
#
|
||||
# (a) Get access through an allowlist via your Google account team
|
||||
# (b) Switch your account type to monthly invoiced billing following this instruction:
|
||||
# https://cloud.google.com/billing/docs/how-to/invoiced-billing
|
||||
#
|
||||
# GOOGLE_SAFETY_SEXUALLY_EXPLICIT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HATE_SPEECH=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_HARASSMENT=BLOCK_ONLY_HIGH
|
||||
# GOOGLE_SAFETY_DANGEROUS_CONTENT=BLOCK_ONLY_HIGH
|
||||
|
||||
|
||||
#============#
|
||||
# OpenAI #
|
||||
#============#
|
||||
|
||||
OPENAI_API_KEY=user_provided
|
||||
# OPENAI_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
# OPENAI_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-0301,gpt-3.5-turbo,gpt-4,gpt-4-0613,gpt-4-vision-preview,gpt-3.5-turbo-0613,gpt-3.5-turbo-16k-0613,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview,gpt-3.5-turbo-1106,gpt-3.5-turbo-instruct,gpt-3.5-turbo-instruct-0914,gpt-3.5-turbo-16k
|
||||
|
||||
DEBUG_OPENAI=false
|
||||
|
||||
@@ -143,7 +162,17 @@ DEBUG_OPENAI=false
|
||||
|
||||
ASSISTANTS_API_KEY=user_provided
|
||||
# ASSISTANTS_BASE_URL=
|
||||
# ASSISTANTS_MODELS=gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||
# ASSISTANTS_MODELS=gpt-4o,gpt-3.5-turbo-0125,gpt-3.5-turbo-16k-0613,gpt-3.5-turbo-16k,gpt-3.5-turbo,gpt-4,gpt-4-0314,gpt-4-32k-0314,gpt-4-0613,gpt-3.5-turbo-0613,gpt-3.5-turbo-1106,gpt-4-0125-preview,gpt-4-turbo-preview,gpt-4-1106-preview
|
||||
|
||||
#==========================#
|
||||
# Azure Assistants API #
|
||||
#==========================#
|
||||
|
||||
# Note: You should map your credentials with custom variables according to your Azure OpenAI Configuration
|
||||
# The models for Azure Assistants are also determined by your Azure OpenAI configuration.
|
||||
|
||||
# More info, including how to enable use of Assistants with Azure here:
|
||||
# https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints/azure#using-assistants-with-azure
|
||||
|
||||
#============#
|
||||
# OpenRouter #
|
||||
@@ -155,7 +184,7 @@ ASSISTANTS_API_KEY=user_provided
|
||||
# Plugins #
|
||||
#============#
|
||||
|
||||
# PLUGIN_MODELS=gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||
# PLUGIN_MODELS=gpt-4o,gpt-4,gpt-4-turbo-preview,gpt-4-0125-preview,gpt-4-1106-preview,gpt-4-0613,gpt-3.5-turbo,gpt-3.5-turbo-0125,gpt-3.5-turbo-1106,gpt-3.5-turbo-0613
|
||||
|
||||
DEBUG_PLUGINS=true
|
||||
|
||||
|
||||
@@ -132,6 +132,13 @@ module.exports = {
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
files: './config/translations/**/*.ts',
|
||||
parser: '@typescript-eslint/parser',
|
||||
parserOptions: {
|
||||
project: './config/translations/tsconfig.json',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['./packages/data-provider/specs/**/*.ts'],
|
||||
parserOptions: {
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml
vendored
2
.github/ISSUE_TEMPLATE/FEATURE-REQUEST.yml
vendored
@@ -43,7 +43,7 @@ body:
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md)
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
|
||||
2
.github/ISSUE_TEMPLATE/QUESTION.yml
vendored
2
.github/ISSUE_TEMPLATE/QUESTION.yml
vendored
@@ -44,7 +44,7 @@ body:
|
||||
id: terms
|
||||
attributes:
|
||||
label: Code of Conduct
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/CODE_OF_CONDUCT.md)
|
||||
description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/danny-avila/LibreChat/blob/main/.github/CODE_OF_CONDUCT.md)
|
||||
options:
|
||||
- label: I agree to follow this project's Code of Conduct
|
||||
required: true
|
||||
|
||||
9
.github/pull_request_template.md
vendored
9
.github/pull_request_template.md
vendored
@@ -1,7 +1,10 @@
|
||||
# Pull Request Template
|
||||
|
||||
⚠️ Before Submitting a PR, Please Review:
|
||||
- Please ensure that you have thoroughly read and understood the [Contributing Docs](https://github.com/danny-avila/LibreChat/blob/main/.github/CONTRIBUTING.md) before submitting your Pull Request.
|
||||
|
||||
### ⚠️ Before Submitting a PR, read the [Contributing Docs](https://github.com/danny-avila/LibreChat/blob/main/.github/CONTRIBUTING.md) in full!
|
||||
⚠️ Documentation Updates Notice:
|
||||
- Kindly note that documentation updates are managed in this repository: [librechat.ai](https://github.com/LibreChat-AI/librechat.ai)
|
||||
|
||||
## Summary
|
||||
|
||||
@@ -16,8 +19,6 @@ Please delete any irrelevant options.
|
||||
- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)
|
||||
- [ ] This change requires a documentation update
|
||||
- [ ] Translation update
|
||||
- [ ] Documentation update
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
@@ -37,4 +38,4 @@ Please delete any irrelevant options.
|
||||
- [ ] I have written tests demonstrating that my changes are effective or that my feature works
|
||||
- [ ] Local unit tests pass with my changes
|
||||
- [ ] Any changes dependent on mine have been merged and published in downstream modules.
|
||||
- [ ] New documents have been locally validated with mkdocs
|
||||
- [ ] A pull request for updating the documentation has been submitted.
|
||||
|
||||
36
.github/workflows/frontend-review.yml
vendored
36
.github/workflows/frontend-review.yml
vendored
@@ -1,11 +1,6 @@
|
||||
#github action to run unit tests for frontend with jest
|
||||
name: Frontend Unit Tests
|
||||
|
||||
on:
|
||||
# push:
|
||||
# branches:
|
||||
# - main
|
||||
# - dev
|
||||
# - release/*
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
@@ -14,9 +9,10 @@ on:
|
||||
paths:
|
||||
- 'client/**'
|
||||
- 'packages/**'
|
||||
|
||||
jobs:
|
||||
tests_frontend:
|
||||
name: Run frontend unit tests
|
||||
tests_frontend_ubuntu:
|
||||
name: Run frontend unit tests on Ubuntu
|
||||
timeout-minutes: 60
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -35,4 +31,26 @@ jobs:
|
||||
|
||||
- name: Run unit tests
|
||||
run: npm run test:ci --verbose
|
||||
working-directory: client
|
||||
working-directory: client
|
||||
|
||||
tests_frontend_windows:
|
||||
name: Run frontend unit tests on Windows
|
||||
timeout-minutes: 60
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Use Node.js 20.x
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 20
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build Client
|
||||
run: npm run frontend:ci
|
||||
|
||||
- name: Run unit tests
|
||||
run: npm run test:ci --verbose
|
||||
working-directory: client
|
||||
|
||||
27
.github/workflows/mkdocs.yaml
vendored
27
.github/workflows/mkdocs.yaml
vendored
@@ -1,27 +0,0 @@
|
||||
name: mkdocs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
- run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
key: mkdocs-material-${{ env.cache_id }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
- run: pip install mkdocs-material
|
||||
- run: pip install mkdocs-nav-weight
|
||||
- run: pip install mkdocs-publisher
|
||||
- run: pip install mkdocs-exclude
|
||||
- run: mkdocs gh-deploy --force
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -21,6 +21,10 @@ coverage
|
||||
# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
|
||||
.grunt
|
||||
|
||||
# translation services
|
||||
config/translations/stores/*
|
||||
client/src/localization/languages/*_missing_keys.json
|
||||
|
||||
# Compiled Dirs (http://nodejs.org/api/addons.html)
|
||||
build/
|
||||
dist/
|
||||
@@ -69,6 +73,8 @@ src/style - official.css
|
||||
/playwright/.cache/
|
||||
.DS_Store
|
||||
*.code-workspace
|
||||
.idx
|
||||
monospace.json
|
||||
.idea
|
||||
*.iml
|
||||
*.pem
|
||||
@@ -76,6 +82,7 @@ config.local.ts
|
||||
**/storageState.json
|
||||
junit.xml
|
||||
**/.venv/
|
||||
**/venv/
|
||||
|
||||
# docker override file
|
||||
docker-compose.override.yaml
|
||||
|
||||
32
Dockerfile
32
Dockerfile
@@ -1,10 +1,8 @@
|
||||
# v0.7.1
|
||||
# v0.7.2
|
||||
|
||||
# Base node image
|
||||
FROM node:18-alpine3.18 AS node
|
||||
FROM node:20-alpine AS node
|
||||
|
||||
RUN apk add g++ make py3-pip
|
||||
RUN npm install -g node-gyp
|
||||
RUN apk --no-cache add curl
|
||||
|
||||
RUN mkdir -p /app && chown node:node /app
|
||||
@@ -14,20 +12,20 @@ USER node
|
||||
|
||||
COPY --chown=node:node . .
|
||||
|
||||
# Allow mounting of these files, which have no default
|
||||
# values.
|
||||
RUN touch .env
|
||||
RUN npm config set fetch-retry-maxtimeout 600000
|
||||
RUN npm config set fetch-retries 5
|
||||
RUN npm config set fetch-retry-mintimeout 15000
|
||||
RUN npm install --no-audit
|
||||
RUN \
|
||||
# Allow mounting of these files, which have no default
|
||||
touch .env ; \
|
||||
# Create directories for the volumes to inherit the correct permissions
|
||||
mkdir -p /app/client/public/images /app/api/logs ; \
|
||||
npm config set fetch-retry-maxtimeout 600000 ; \
|
||||
npm config set fetch-retries 5 ; \
|
||||
npm config set fetch-retry-mintimeout 15000 ; \
|
||||
npm install --no-audit; \
|
||||
# React client build
|
||||
NODE_OPTIONS="--max-old-space-size=2048" npm run frontend; \
|
||||
npm prune --production; \
|
||||
npm cache clean --force
|
||||
|
||||
# React client build
|
||||
ENV NODE_OPTIONS="--max-old-space-size=2048"
|
||||
RUN npm run frontend
|
||||
|
||||
# Create directories for the volumes to inherit
|
||||
# the correct permissions
|
||||
RUN mkdir -p /app/client/public/images /app/api/logs
|
||||
|
||||
# Node API setup
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# v0.7.1
|
||||
# v0.7.2
|
||||
|
||||
# Build API, Client and Data Provider
|
||||
FROM node:20-alpine AS base
|
||||
@@ -7,32 +7,31 @@ FROM node:20-alpine AS base
|
||||
FROM base AS data-provider-build
|
||||
WORKDIR /app/packages/data-provider
|
||||
COPY ./packages/data-provider ./
|
||||
RUN npm install
|
||||
RUN npm install; npm cache clean --force
|
||||
RUN npm run build
|
||||
RUN npm prune --production
|
||||
|
||||
# React client build
|
||||
FROM data-provider-build AS client-build
|
||||
FROM base AS client-build
|
||||
WORKDIR /app/client
|
||||
COPY ./client/package*.json ./
|
||||
# Copy data-provider to client's node_modules
|
||||
RUN mkdir -p /app/client/node_modules/librechat-data-provider/
|
||||
RUN cp -R /app/packages/data-provider/* /app/client/node_modules/librechat-data-provider/
|
||||
RUN npm install
|
||||
COPY --from=data-provider-build /app/packages/data-provider/ /app/client/node_modules/librechat-data-provider/
|
||||
RUN npm install; npm cache clean --force
|
||||
COPY ./client/ ./
|
||||
ENV NODE_OPTIONS="--max-old-space-size=2048"
|
||||
RUN npm run build
|
||||
|
||||
# Node API setup
|
||||
FROM data-provider-build AS api-build
|
||||
FROM base AS api-build
|
||||
WORKDIR /app/api
|
||||
COPY api/package*.json ./
|
||||
COPY api/ ./
|
||||
# Copy helper scripts
|
||||
COPY config/ ./
|
||||
# Copy data-provider to API's node_modules
|
||||
RUN mkdir -p /app/api/node_modules/librechat-data-provider/
|
||||
RUN cp -R /app/packages/data-provider/* /app/api/node_modules/librechat-data-provider/
|
||||
RUN npm install
|
||||
COPY --from=data-provider-build /app/packages/data-provider/ /app/api/node_modules/librechat-data-provider/
|
||||
RUN npm install --include prod; npm cache clean --force
|
||||
COPY --from=client-build /app/client/dist /app/client/dist
|
||||
EXPOSE 3080
|
||||
ENV HOST=0.0.0.0
|
||||
|
||||
60
README.md
60
README.md
@@ -1,6 +1,6 @@
|
||||
<p align="center">
|
||||
<a href="https://librechat.ai">
|
||||
<img src="docs/assets/LibreChat.svg" height="256">
|
||||
<img src="client/public/assets/logo.svg" height="256">
|
||||
</a>
|
||||
<h1 align="center">
|
||||
<a href="https://librechat.ai">LibreChat</a>
|
||||
@@ -41,8 +41,16 @@
|
||||
# 📃 Features
|
||||
|
||||
- 🖥️ UI matching ChatGPT, including Dark mode, Streaming, and latest updates
|
||||
- 🤖 AI model selection:
|
||||
- OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
|
||||
- ✅ Compatible across both **[Remote & Local AI services](https://www.librechat.ai/docs/configuration/librechat_yaml/ai_endpoints):**
|
||||
- groq, Ollama, Cohere, Mistral AI, Apple MLX, koboldcpp, OpenRouter, together.ai, Perplexity, ShuttleAI, and more
|
||||
- 💾 Create, Save, & Share Custom Presets
|
||||
- 🔀 Switch between AI Endpoints and Presets, mid-chat
|
||||
- 🔄 Edit, Resubmit, and Continue Messages with Conversation branching
|
||||
- 🌿 Fork Messages & Conversations for Advanced Context control
|
||||
- 💬 Multimodal Chat:
|
||||
- Upload and analyze images with Claude 3, GPT-4, and Gemini Vision 📸
|
||||
- Upload and analyze images with Claude 3, GPT-4 (including `gpt-4o`), and Gemini Vision 📸
|
||||
- Chat with Files using Custom Endpoints, OpenAI, Azure, Anthropic, & Google. 🗃️
|
||||
- Advanced Agents with Files, Code Interpreter, Tools, and API Actions 🔦
|
||||
- Available through the [OpenAI Assistants API](https://platform.openai.com/docs/assistants/overview) 🌤️
|
||||
@@ -50,18 +58,18 @@
|
||||
- 🌎 Multilingual UI:
|
||||
- English, 中文, Deutsch, Español, Français, Italiano, Polski, Português Brasileiro,
|
||||
- Русский, 日本語, Svenska, 한국어, Tiếng Việt, 繁體中文, العربية, Türkçe, Nederlands, עברית
|
||||
- 🤖 AI model selection: OpenAI, Azure OpenAI, BingAI, ChatGPT, Google Vertex AI, Anthropic (Claude), Plugins, Assistants API (including Azure Assistants)
|
||||
- 💾 Create, Save, & Share Custom Presets
|
||||
- 🔄 Edit, Resubmit, and Continue messages with conversation branching
|
||||
- 🎨 Customizable Dropdown & Interface: Adapts to both power users and newcomers.
|
||||
- 📥 Import Conversations from LibreChat, ChatGPT, Chatbot UI
|
||||
- 📤 Export conversations as screenshots, markdown, text, json.
|
||||
- 🔍 Search all messages/conversations
|
||||
- 🔌 Plugins, including web access, image generation with DALL-E-3 and more
|
||||
- 👥 Multi-User, Secure Authentication with Moderation and Token spend tools
|
||||
- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options
|
||||
- ⚙️ Configure Proxy, Reverse Proxy, Docker, & many Deployment options:
|
||||
- Use completely local or deploy on the cloud
|
||||
- 📖 Completely Open-Source & Built in Public
|
||||
- 🧑🤝🧑 Community-driven development, support, and feedback
|
||||
|
||||
[For a thorough review of our features, see our docs here](https://docs.librechat.ai/features/plugins/introduction.html) 📚
|
||||
[For a thorough review of our features, see our docs here](https://docs.librechat.ai/) 📚
|
||||
|
||||
## 🪶 All-In-One AI Conversations with LibreChat
|
||||
|
||||
@@ -69,37 +77,49 @@ LibreChat brings together the future of assistant AIs with the revolutionary tec
|
||||
|
||||
With LibreChat, you no longer need to opt for ChatGPT Plus and can instead use free or pay-per-call APIs. We welcome contributions, cloning, and forking to enhance the capabilities of this advanced chatbot platform.
|
||||
|
||||
<!-- https://github.com/danny-avila/LibreChat/assets/110412045/c1eb0c0f-41f6-4335-b982-84b278b53d59 -->
|
||||
|
||||
[](https://youtu.be/pNIOs1ovsXw)
|
||||
[](https://www.youtube.com/watch?v=YLVUW5UP9N0)
|
||||
Click on the thumbnail to open the video☝️
|
||||
|
||||
---
|
||||
|
||||
## 📚 Documentation
|
||||
## 🌐 Resources
|
||||
|
||||
For more information on how to use our advanced features, install and configure our software, and access our guidelines and tutorials, please check out our documentation at [docs.librechat.ai](https://docs.librechat.ai)
|
||||
**GitHub Repo:**
|
||||
- **RAG API:** [github.com/danny-avila/rag_api](https://github.com/danny-avila/rag_api)
|
||||
- **Website:** [github.com/LibreChat-AI/librechat.ai](https://github.com/LibreChat-AI/librechat.ai)
|
||||
|
||||
**Other:**
|
||||
- **Website:** [librechat.ai](https://librechat.ai)
|
||||
- **Documentation:** [docs.librechat.ai](https://docs.librechat.ai)
|
||||
- **Blog:** [blog.librechat.ai](https://docs.librechat.ai)
|
||||
|
||||
---
|
||||
|
||||
## 📝 Changelog
|
||||
|
||||
Keep up with the latest updates by visiting the releases page - [Releases](https://github.com/danny-avila/LibreChat/releases)
|
||||
Keep up with the latest updates by visiting the releases page and notes:
|
||||
- [Releases](https://github.com/danny-avila/LibreChat/releases)
|
||||
- [Changelog](https://www.librechat.ai/changelog)
|
||||
|
||||
**⚠️ [Breaking Changes](docs/general_info/breaking_changes.md)**
|
||||
Please consult the breaking changes before updating.
|
||||
**⚠️ Please consult the [changelog](https://www.librechat.ai/changelog) for breaking changes before updating.**
|
||||
|
||||
---
|
||||
|
||||
## ⭐ Star History
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/4685" target="_blank"><img src="https://trendshift.io/api/badge/repositories/4685" alt="danny-avila%2FLibreChat | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
<a href="https://star-history.com/#danny-avila/LibreChat&Date">
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=danny-avila/LibreChat&type=Date&theme=dark" onerror="this.src='https://api.star-history.com/svg?repos=danny-avila/LibreChat&type=Date'" />
|
||||
</a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/4685" target="_blank" style="padding: 10px;">
|
||||
<img src="https://trendshift.io/api/badge/repositories/4685" alt="danny-avila%2FLibreChat | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/>
|
||||
</a>
|
||||
<a href="https://runacap.com/ross-index/q1-24/" target="_blank" rel="noopener" style="margin-left: 20px;">
|
||||
<img style="width: 260px; height: 56px" src="https://runacap.com/wp-content/uploads/2024/04/ROSS_badge_white_Q1_2024.svg" alt="ROSS Index - Fastest Growing Open-Source Startups in Q1 2024 | Runa Capital" width="260" height="56"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<a href="https://star-history.com/#danny-avila/LibreChat&Date">
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=danny-avila/LibreChat&type=Date&theme=dark" onerror="this.src='https://api.star-history.com/svg?repos=danny-avila/LibreChat&type=Date'" />
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ const {
|
||||
} = require('librechat-data-provider');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const {
|
||||
titleFunctionPrompt,
|
||||
parseTitleFromPrompt,
|
||||
truncateText,
|
||||
formatMessage,
|
||||
titleFunctionPrompt,
|
||||
parseParamFromPrompt,
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
@@ -75,7 +75,9 @@ class AnthropicClient extends BaseClient {
|
||||
this.options.attachments?.then((attachments) => this.checkVisionRequest(attachments));
|
||||
|
||||
this.maxContextTokens =
|
||||
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ?? 100000;
|
||||
this.options.maxContextTokens ??
|
||||
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.anthropic) ??
|
||||
100000;
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || 1500;
|
||||
this.maxPromptTokens =
|
||||
this.options.maxPromptTokens || this.maxContextTokens - this.maxResponseTokens;
|
||||
@@ -652,9 +654,13 @@ class AnthropicClient extends BaseClient {
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
maxContextTokens: this.options.maxContextTokens,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
modelLabel: this.options.modelLabel,
|
||||
resendFiles: this.options.resendFiles,
|
||||
iconURL: this.options.iconURL,
|
||||
greeting: this.options.greeting,
|
||||
spec: this.options.spec,
|
||||
...this.modelOptions,
|
||||
};
|
||||
}
|
||||
@@ -742,7 +748,7 @@ class AnthropicClient extends BaseClient {
|
||||
context: 'title',
|
||||
});
|
||||
const text = response.content[0].text;
|
||||
title = parseTitleFromPrompt(text);
|
||||
title = parseParamFromPrompt(text, 'title');
|
||||
} catch (e) {
|
||||
logger.error('[AnthropicClient] There was an issue generating the title', e);
|
||||
}
|
||||
|
||||
@@ -456,6 +456,8 @@ class BaseClient {
|
||||
sender: this.sender,
|
||||
text: addSpaceIfNeeded(generation) + completion,
|
||||
promptTokens,
|
||||
iconURL: this.options.iconURL,
|
||||
endpoint: this.options.endpoint,
|
||||
...(this.metadata ?? {}),
|
||||
};
|
||||
|
||||
@@ -525,8 +527,19 @@ class BaseClient {
|
||||
return _messages;
|
||||
}
|
||||
|
||||
/**
|
||||
* Save a message to the database.
|
||||
* @param {TMessage} message
|
||||
* @param {Partial<TConversation>} endpointOptions
|
||||
* @param {string | null} user
|
||||
*/
|
||||
async saveMessageToDatabase(message, endpointOptions, user = null) {
|
||||
await saveMessage({ ...message, endpoint: this.options.endpoint, user, unfinished: false });
|
||||
await saveMessage({
|
||||
...message,
|
||||
endpoint: this.options.endpoint,
|
||||
unfinished: false,
|
||||
user,
|
||||
});
|
||||
await saveConvo(user, {
|
||||
conversationId: message.conversationId,
|
||||
endpoint: this.options.endpoint,
|
||||
@@ -556,11 +569,11 @@ class BaseClient {
|
||||
* the message is considered a root message.
|
||||
*
|
||||
* @param {Object} options - The options for the function.
|
||||
* @param {Array} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property.
|
||||
* @param {TMessage[]} options.messages - An array of message objects. Each object should have either an 'id' or 'messageId' property, and may have a 'parentMessageId' property.
|
||||
* @param {string} options.parentMessageId - The ID of the parent message to start the traversal from.
|
||||
* @param {Function} [options.mapMethod] - An optional function to map over the ordered messages. If provided, it will be applied to each message in the resulting array.
|
||||
* @param {boolean} [options.summary=false] - If set to true, the traversal modifies messages with 'summary' and 'summaryTokenCount' properties and stops at the message with a 'summary' property.
|
||||
* @returns {Array} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'.
|
||||
* @returns {TMessage[]} An array containing the messages in the order they should be displayed, starting with the most recent message with a 'summary' property if the 'summary' option is true, and ending with the message identified by 'parentMessageId'.
|
||||
*/
|
||||
static getMessagesForConversation({
|
||||
messages,
|
||||
|
||||
@@ -138,7 +138,10 @@ class GoogleClient extends BaseClient {
|
||||
!isGenerativeModel && !isChatModel && /code|text/.test(this.modelOptions.model);
|
||||
const { isTextModel } = this;
|
||||
|
||||
this.maxContextTokens = getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google);
|
||||
this.maxContextTokens =
|
||||
this.options.maxContextTokens ??
|
||||
getModelMaxTokens(this.modelOptions.model, EModelEndpoint.google);
|
||||
|
||||
// The max prompt tokens is determined by the max context tokens minus the max response tokens.
|
||||
// Earlier messages will be dropped until the prompt is within the limit.
|
||||
this.maxResponseTokens = this.modelOptions.maxOutputTokens || settings.maxOutputTokens.default;
|
||||
@@ -677,26 +680,36 @@ class GoogleClient extends BaseClient {
|
||||
};
|
||||
}
|
||||
|
||||
const safetySettings = _payload.safetySettings;
|
||||
requestOptions.safetySettings = safetySettings;
|
||||
|
||||
const delay = modelName.includes('flash') ? 8 : 14;
|
||||
const result = await client.generateContentStream(requestOptions);
|
||||
for await (const chunk of result.stream) {
|
||||
const chunkText = chunk.text();
|
||||
this.generateTextStream(chunkText, onProgress, {
|
||||
delay: 12,
|
||||
await this.generateTextStream(chunkText, onProgress, {
|
||||
delay,
|
||||
});
|
||||
reply += chunkText;
|
||||
}
|
||||
return reply;
|
||||
}
|
||||
|
||||
const safetySettings = _payload.safetySettings;
|
||||
const stream = await model.stream(messages, {
|
||||
signal: abortController.signal,
|
||||
timeout: 7000,
|
||||
safetySettings: safetySettings,
|
||||
});
|
||||
|
||||
let delay = this.isGenerativeModel ? 12 : 8;
|
||||
if (modelName.includes('flash')) {
|
||||
delay = 5;
|
||||
}
|
||||
for await (const chunk of stream) {
|
||||
const chunkText = chunk?.content ?? chunk;
|
||||
this.generateTextStream(chunkText, onProgress, {
|
||||
delay: this.isGenerativeModel ? 12 : 8,
|
||||
await this.generateTextStream(chunkText, onProgress, {
|
||||
delay,
|
||||
});
|
||||
reply += chunkText;
|
||||
}
|
||||
@@ -708,6 +721,9 @@ class GoogleClient extends BaseClient {
|
||||
return {
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
modelLabel: this.options.modelLabel,
|
||||
iconURL: this.options.iconURL,
|
||||
greeting: this.options.greeting,
|
||||
spec: this.options.spec,
|
||||
...this.modelOptions,
|
||||
};
|
||||
}
|
||||
@@ -717,6 +733,33 @@ class GoogleClient extends BaseClient {
|
||||
}
|
||||
|
||||
async sendCompletion(payload, opts = {}) {
|
||||
const modelName = payload.parameters?.model;
|
||||
|
||||
if (modelName && modelName.toLowerCase().includes('gemini')) {
|
||||
const safetySettings = [
|
||||
{
|
||||
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
||||
threshold:
|
||||
process.env.GOOGLE_SAFETY_SEXUALLY_EXPLICIT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HATE_SPEECH',
|
||||
threshold: process.env.GOOGLE_SAFETY_HATE_SPEECH || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_HARASSMENT',
|
||||
threshold: process.env.GOOGLE_SAFETY_HARASSMENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
{
|
||||
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
||||
threshold:
|
||||
process.env.GOOGLE_SAFETY_DANGEROUS_CONTENT || 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',
|
||||
},
|
||||
];
|
||||
|
||||
payload.safetySettings = safetySettings;
|
||||
}
|
||||
|
||||
let reply = '';
|
||||
reply = await this.getCompletion(payload, opts);
|
||||
return reply.trim();
|
||||
|
||||
154
api/app/clients/OllamaClient.js
Normal file
154
api/app/clients/OllamaClient.js
Normal file
@@ -0,0 +1,154 @@
|
||||
const { z } = require('zod');
|
||||
const axios = require('axios');
|
||||
const { Ollama } = require('ollama');
|
||||
const { deriveBaseURL } = require('~/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const ollamaPayloadSchema = z.object({
|
||||
mirostat: z.number().optional(),
|
||||
mirostat_eta: z.number().optional(),
|
||||
mirostat_tau: z.number().optional(),
|
||||
num_ctx: z.number().optional(),
|
||||
repeat_last_n: z.number().optional(),
|
||||
repeat_penalty: z.number().optional(),
|
||||
temperature: z.number().optional(),
|
||||
seed: z.number().nullable().optional(),
|
||||
stop: z.array(z.string()).optional(),
|
||||
tfs_z: z.number().optional(),
|
||||
num_predict: z.number().optional(),
|
||||
top_k: z.number().optional(),
|
||||
top_p: z.number().optional(),
|
||||
stream: z.optional(z.boolean()),
|
||||
model: z.string(),
|
||||
});
|
||||
|
||||
/**
|
||||
* @param {string} imageUrl
|
||||
* @returns {string}
|
||||
* @throws {Error}
|
||||
*/
|
||||
const getValidBase64 = (imageUrl) => {
|
||||
const parts = imageUrl.split(';base64,');
|
||||
|
||||
if (parts.length === 2) {
|
||||
return parts[1];
|
||||
} else {
|
||||
logger.error('Invalid or no Base64 string found in URL.');
|
||||
}
|
||||
};
|
||||
|
||||
class OllamaClient {
|
||||
constructor(options = {}) {
|
||||
const host = deriveBaseURL(options.baseURL ?? 'http://localhost:11434');
|
||||
/** @type {Ollama} */
|
||||
this.client = new Ollama({ host });
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetches Ollama models from the specified base API path.
|
||||
* @param {string} baseURL
|
||||
* @returns {Promise<string[]>} The Ollama models.
|
||||
*/
|
||||
static async fetchModels(baseURL) {
|
||||
let models = [];
|
||||
if (!baseURL) {
|
||||
return models;
|
||||
}
|
||||
try {
|
||||
const ollamaEndpoint = deriveBaseURL(baseURL);
|
||||
/** @type {Promise<AxiosResponse<OllamaListResponse>>} */
|
||||
const response = await axios.get(`${ollamaEndpoint}/api/tags`);
|
||||
models = response.data.models.map((tag) => tag.name);
|
||||
return models;
|
||||
} catch (error) {
|
||||
const logMessage =
|
||||
'Failed to fetch models from Ollama API. If you are not using Ollama directly, and instead, through some aggregator or reverse proxy that handles fetching via OpenAI spec, ensure the name of the endpoint doesn\'t start with `ollama` (case-insensitive).';
|
||||
logger.error(logMessage, error);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {ChatCompletionMessage[]} messages
|
||||
* @returns {OllamaMessage[]}
|
||||
*/
|
||||
static formatOpenAIMessages(messages) {
|
||||
const ollamaMessages = [];
|
||||
|
||||
for (const message of messages) {
|
||||
if (typeof message.content === 'string') {
|
||||
ollamaMessages.push({
|
||||
role: message.role,
|
||||
content: message.content,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
let aggregatedText = '';
|
||||
let imageUrls = [];
|
||||
|
||||
for (const content of message.content) {
|
||||
if (content.type === 'text') {
|
||||
aggregatedText += content.text + ' ';
|
||||
} else if (content.type === 'image_url') {
|
||||
imageUrls.push(getValidBase64(content.image_url.url));
|
||||
}
|
||||
}
|
||||
|
||||
const ollamaMessage = {
|
||||
role: message.role,
|
||||
content: aggregatedText.trim(),
|
||||
};
|
||||
|
||||
if (imageUrls.length > 0) {
|
||||
ollamaMessage.images = imageUrls;
|
||||
}
|
||||
|
||||
ollamaMessages.push(ollamaMessage);
|
||||
}
|
||||
|
||||
return ollamaMessages;
|
||||
}
|
||||
|
||||
/***
|
||||
* @param {Object} params
|
||||
* @param {ChatCompletionPayload} params.payload
|
||||
* @param {onTokenProgress} params.onProgress
|
||||
* @param {AbortController} params.abortController
|
||||
*/
|
||||
async chatCompletion({ payload, onProgress, abortController = null }) {
|
||||
let intermediateReply = '';
|
||||
|
||||
const parameters = ollamaPayloadSchema.parse(payload);
|
||||
const messages = OllamaClient.formatOpenAIMessages(payload.messages);
|
||||
|
||||
if (parameters.stream) {
|
||||
const stream = await this.client.chat({
|
||||
messages,
|
||||
...parameters,
|
||||
});
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.message.content;
|
||||
intermediateReply += token;
|
||||
onProgress(token);
|
||||
if (abortController.signal.aborted) {
|
||||
stream.controller.abort();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: regular completion
|
||||
else {
|
||||
// const generation = await this.client.generate(payload);
|
||||
}
|
||||
|
||||
return intermediateReply;
|
||||
}
|
||||
catch(err) {
|
||||
logger.error('[OllamaClient.chatCompletion]', err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { OllamaClient, ollamaPayloadSchema };
|
||||
@@ -1,6 +1,8 @@
|
||||
const OpenAI = require('openai');
|
||||
const { OllamaClient } = require('./OllamaClient');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const {
|
||||
Constants,
|
||||
ImageDetail,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
@@ -20,16 +22,16 @@ const {
|
||||
const {
|
||||
truncateText,
|
||||
formatMessage,
|
||||
createContextHandlers,
|
||||
CUT_OFF_PROMPT,
|
||||
titleInstruction,
|
||||
createContextHandlers,
|
||||
} = require('./prompts');
|
||||
const { encodeAndFormat } = require('~/server/services/Files/images/encode');
|
||||
const { isEnabled, sleep } = require('~/server/utils');
|
||||
const { handleOpenAIErrors } = require('./tools/util');
|
||||
const spendTokens = require('~/models/spendTokens');
|
||||
const { createLLM, RunManager } = require('./llm');
|
||||
const ChatGPTClient = require('./ChatGPTClient');
|
||||
const { isEnabled } = require('~/server/utils');
|
||||
const { summaryBuffer } = require('./memory');
|
||||
const { runTitleChain } = require('./chains');
|
||||
const { tokenSplit } = require('./document');
|
||||
@@ -127,6 +129,10 @@ class OpenAIClient extends BaseClient {
|
||||
this.useOpenRouter = true;
|
||||
}
|
||||
|
||||
if (this.options.endpoint?.toLowerCase() === 'ollama') {
|
||||
this.isOllama = true;
|
||||
}
|
||||
|
||||
this.FORCE_PROMPT =
|
||||
isEnabled(OPENAI_FORCE_PROMPT) ||
|
||||
(reverseProxy && reverseProxy.includes('completions') && !reverseProxy.includes('chat'));
|
||||
@@ -159,11 +165,13 @@ class OpenAIClient extends BaseClient {
|
||||
model.startsWith('text-chat') || model.startsWith('text-davinci-002-render');
|
||||
|
||||
this.maxContextTokens =
|
||||
this.options.maxContextTokens ??
|
||||
getModelMaxTokens(
|
||||
model,
|
||||
this.options.endpointType ?? this.options.endpoint,
|
||||
this.options.endpointTokenConfig,
|
||||
) ?? 4095; // 1 less than maximum
|
||||
) ??
|
||||
4095; // 1 less than maximum
|
||||
|
||||
if (this.shouldSummarize) {
|
||||
this.maxContextTokens = Math.floor(this.maxContextTokens / 2);
|
||||
@@ -200,16 +208,6 @@ class OpenAIClient extends BaseClient {
|
||||
|
||||
this.setupTokens();
|
||||
|
||||
if (!this.modelOptions.stop && !this.isVisionModel) {
|
||||
const stopTokens = [this.startToken];
|
||||
if (this.endToken && this.endToken !== this.startToken) {
|
||||
stopTokens.push(this.endToken);
|
||||
}
|
||||
stopTokens.push(`\n${this.userLabel}:`);
|
||||
stopTokens.push('<|diff_marker|>');
|
||||
this.modelOptions.stop = stopTokens;
|
||||
}
|
||||
|
||||
if (reverseProxy) {
|
||||
this.completionsUrl = reverseProxy;
|
||||
this.langchainProxy = extractBaseURL(reverseProxy);
|
||||
@@ -243,23 +241,52 @@ class OpenAIClient extends BaseClient {
|
||||
* @param {MongoFile[]} attachments
|
||||
*/
|
||||
checkVisionRequest(attachments) {
|
||||
const availableModels = this.options.modelsConfig?.[this.options.endpoint];
|
||||
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||
|
||||
const visionModelAvailable = availableModels?.includes(this.defaultVisionModel);
|
||||
if (
|
||||
attachments &&
|
||||
attachments.some((file) => file?.type && file?.type?.includes('image')) &&
|
||||
visionModelAvailable &&
|
||||
!this.isVisionModel
|
||||
) {
|
||||
this.modelOptions.model = this.defaultVisionModel;
|
||||
this.isVisionModel = true;
|
||||
if (!attachments) {
|
||||
return;
|
||||
}
|
||||
|
||||
const availableModels = this.options.modelsConfig?.[this.options.endpoint];
|
||||
if (!availableModels) {
|
||||
return;
|
||||
}
|
||||
|
||||
let visionRequestDetected = false;
|
||||
for (const file of attachments) {
|
||||
if (file?.type?.includes('image')) {
|
||||
visionRequestDetected = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!visionRequestDetected) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.isVisionModel = validateVisionModel({ model: this.modelOptions.model, availableModels });
|
||||
if (this.isVisionModel) {
|
||||
delete this.modelOptions.stop;
|
||||
return;
|
||||
}
|
||||
|
||||
for (const model of availableModels) {
|
||||
if (!validateVisionModel({ model, availableModels })) {
|
||||
continue;
|
||||
}
|
||||
this.modelOptions.model = model;
|
||||
this.isVisionModel = true;
|
||||
delete this.modelOptions.stop;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!availableModels.includes(this.defaultVisionModel)) {
|
||||
return;
|
||||
}
|
||||
if (!validateVisionModel({ model: this.defaultVisionModel, availableModels })) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.modelOptions.model = this.defaultVisionModel;
|
||||
this.isVisionModel = true;
|
||||
delete this.modelOptions.stop;
|
||||
}
|
||||
|
||||
setupTokens() {
|
||||
@@ -281,7 +308,7 @@ class OpenAIClient extends BaseClient {
|
||||
let tokenizer;
|
||||
this.encoding = 'text-davinci-003';
|
||||
if (this.isChatCompletion) {
|
||||
this.encoding = 'cl100k_base';
|
||||
this.encoding = this.modelOptions.model.includes('gpt-4o') ? 'o200k_base' : 'cl100k_base';
|
||||
tokenizer = this.constructor.getTokenizer(this.encoding);
|
||||
} else if (this.isUnofficialChatGptModel) {
|
||||
const extendSpecialTokens = {
|
||||
@@ -386,10 +413,14 @@ class OpenAIClient extends BaseClient {
|
||||
|
||||
getSaveOptions() {
|
||||
return {
|
||||
maxContextTokens: this.options.maxContextTokens,
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
resendFiles: this.options.resendFiles,
|
||||
imageDetail: this.options.imageDetail,
|
||||
iconURL: this.options.iconURL,
|
||||
greeting: this.options.greeting,
|
||||
spec: this.options.spec,
|
||||
...this.modelOptions,
|
||||
};
|
||||
}
|
||||
@@ -411,7 +442,11 @@ class OpenAIClient extends BaseClient {
|
||||
* @returns {Promise<MongoFile[]>}
|
||||
*/
|
||||
async addImageURLs(message, attachments) {
|
||||
const { files, image_urls } = await encodeAndFormat(this.options.req, attachments);
|
||||
const { files, image_urls } = await encodeAndFormat(
|
||||
this.options.req,
|
||||
attachments,
|
||||
this.options.endpoint,
|
||||
);
|
||||
message.image_urls = image_urls.length ? image_urls : undefined;
|
||||
return files;
|
||||
}
|
||||
@@ -721,6 +756,12 @@ class OpenAIClient extends BaseClient {
|
||||
* In case of failure, it will return the default title, "New Chat".
|
||||
*/
|
||||
async titleConvo({ text, conversationId, responseText = '' }) {
|
||||
this.conversationId = conversationId;
|
||||
|
||||
if (this.options.attachments) {
|
||||
delete this.options.attachments;
|
||||
}
|
||||
|
||||
let title = 'New Chat';
|
||||
const convo = `||>User:
|
||||
"${truncateText(text)}"
|
||||
@@ -729,7 +770,10 @@ class OpenAIClient extends BaseClient {
|
||||
|
||||
const { OPENAI_TITLE_MODEL } = process.env ?? {};
|
||||
|
||||
const model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
|
||||
let model = this.options.titleModel ?? OPENAI_TITLE_MODEL ?? 'gpt-3.5-turbo';
|
||||
if (model === Constants.CURRENT_MODEL) {
|
||||
model = this.modelOptions.model;
|
||||
}
|
||||
|
||||
const modelOptions = {
|
||||
// TODO: remove the gpt fallback and make it specific to endpoint
|
||||
@@ -796,13 +840,17 @@ ${convo}
|
||||
|
||||
try {
|
||||
let useChatCompletion = true;
|
||||
|
||||
if (this.options.reverseProxyUrl === CohereConstants.API_URL) {
|
||||
useChatCompletion = false;
|
||||
}
|
||||
|
||||
title = (
|
||||
await this.sendPayload(instructionsPayload, { modelOptions, useChatCompletion })
|
||||
).replaceAll('"', '');
|
||||
|
||||
const completionTokens = this.getTokenCount(title);
|
||||
|
||||
this.recordTokenUsage({ promptTokens, completionTokens, context: 'title' });
|
||||
} catch (e) {
|
||||
logger.error(
|
||||
@@ -826,6 +874,7 @@ ${convo}
|
||||
context: 'title',
|
||||
tokenBuffer: 150,
|
||||
});
|
||||
|
||||
title = await runTitleChain({ llm, text, convo, signal: this.abortController.signal });
|
||||
} catch (e) {
|
||||
if (e?.message?.toLowerCase()?.includes('abort')) {
|
||||
@@ -851,7 +900,11 @@ ${convo}
|
||||
|
||||
// TODO: remove the gpt fallback and make it specific to endpoint
|
||||
const { OPENAI_SUMMARY_MODEL = 'gpt-3.5-turbo' } = process.env ?? {};
|
||||
const model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
|
||||
let model = this.options.summaryModel ?? OPENAI_SUMMARY_MODEL;
|
||||
if (model === Constants.CURRENT_MODEL) {
|
||||
model = this.modelOptions.model;
|
||||
}
|
||||
|
||||
const maxContextTokens =
|
||||
getModelMaxTokens(
|
||||
model,
|
||||
@@ -959,9 +1012,9 @@ ${convo}
|
||||
await spendTokens(
|
||||
{
|
||||
context,
|
||||
user: this.user,
|
||||
model: this.modelOptions.model,
|
||||
conversationId: this.conversationId,
|
||||
user: this.user ?? this.options.req.user?.id,
|
||||
endpointTokenConfig: this.options.endpointTokenConfig,
|
||||
},
|
||||
{ promptTokens, completionTokens },
|
||||
@@ -1078,11 +1131,8 @@ ${convo}
|
||||
...opts,
|
||||
});
|
||||
|
||||
/* hacky fixes for Mistral AI API:
|
||||
- Re-orders system message to the top of the messages payload, as not allowed anywhere else
|
||||
- If there is only one message and it's a system message, change the role to user
|
||||
*/
|
||||
if (opts.baseURL.includes('https://api.mistral.ai/v1') && modelOptions.messages) {
|
||||
/* Re-orders system message to the top of the messages payload, as not allowed anywhere else */
|
||||
if (modelOptions.messages && (opts.baseURL.includes('api.mistral.ai') || this.isOllama)) {
|
||||
const { messages } = modelOptions;
|
||||
|
||||
const systemMessageIndex = messages.findIndex((msg) => msg.role === 'system');
|
||||
@@ -1093,10 +1143,16 @@ ${convo}
|
||||
}
|
||||
|
||||
modelOptions.messages = messages;
|
||||
}
|
||||
|
||||
if (messages.length === 1 && messages[0].role === 'system') {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
/* If there is only one message and it's a system message, change the role to user */
|
||||
if (
|
||||
(opts.baseURL.includes('api.mistral.ai') || opts.baseURL.includes('api.perplexity.ai')) &&
|
||||
modelOptions.messages &&
|
||||
modelOptions.messages.length === 1 &&
|
||||
modelOptions.messages[0]?.role === 'system'
|
||||
) {
|
||||
modelOptions.messages[0].role = 'user';
|
||||
}
|
||||
|
||||
if (this.options.addParams && typeof this.options.addParams === 'object') {
|
||||
@@ -1120,6 +1176,15 @@ ${convo}
|
||||
});
|
||||
}
|
||||
|
||||
if (this.message_file_map && this.isOllama) {
|
||||
const ollamaClient = new OllamaClient({ baseURL });
|
||||
return await ollamaClient.chatCompletion({
|
||||
payload: modelOptions,
|
||||
onProgress,
|
||||
abortController,
|
||||
});
|
||||
}
|
||||
|
||||
let UnexpectedRoleError = false;
|
||||
if (modelOptions.stream) {
|
||||
const stream = await openai.beta.chat.completions
|
||||
@@ -1150,6 +1215,7 @@ ${convo}
|
||||
}
|
||||
});
|
||||
|
||||
const azureDelay = this.modelOptions.model?.includes('gpt-4') ? 30 : 17;
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.choices[0]?.delta?.content || '';
|
||||
intermediateReply += token;
|
||||
@@ -1158,6 +1224,10 @@ ${convo}
|
||||
stream.controller.abort();
|
||||
break;
|
||||
}
|
||||
|
||||
if (this.azure) {
|
||||
await sleep(azureDelay);
|
||||
}
|
||||
}
|
||||
|
||||
if (!UnexpectedRoleError) {
|
||||
|
||||
@@ -42,8 +42,12 @@ class PluginsClient extends OpenAIClient {
|
||||
return {
|
||||
chatGptLabel: this.options.chatGptLabel,
|
||||
promptPrefix: this.options.promptPrefix,
|
||||
tools: this.options.tools,
|
||||
...this.modelOptions,
|
||||
agentOptions: this.agentOptions,
|
||||
iconURL: this.options.iconURL,
|
||||
greeting: this.options.greeting,
|
||||
spec: this.options.spec,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -144,9 +148,11 @@ class PluginsClient extends OpenAIClient {
|
||||
signal,
|
||||
pastMessages,
|
||||
tools: this.tools,
|
||||
currentDateString: this.currentDateString,
|
||||
verbose: this.options.debug,
|
||||
returnIntermediateSteps: true,
|
||||
customName: this.options.chatGptLabel,
|
||||
currentDateString: this.currentDateString,
|
||||
customInstructions: this.options.promptPrefix,
|
||||
callbackManager: CallbackManager.fromHandlers({
|
||||
async handleAgentAction(action, runId) {
|
||||
handleAction(action, runId, onAgentAction);
|
||||
@@ -304,6 +310,8 @@ class PluginsClient extends OpenAIClient {
|
||||
}
|
||||
|
||||
const responseMessage = {
|
||||
endpoint: EModelEndpoint.gptPlugins,
|
||||
iconURL: this.options.iconURL,
|
||||
messageId: responseMessageId,
|
||||
conversationId,
|
||||
parentMessageId: userMessage.messageId,
|
||||
|
||||
@@ -13,10 +13,18 @@ const initializeCustomAgent = async ({
|
||||
tools,
|
||||
model,
|
||||
pastMessages,
|
||||
customName,
|
||||
customInstructions,
|
||||
currentDateString,
|
||||
...rest
|
||||
}) => {
|
||||
let prompt = CustomAgent.createPrompt(tools, { currentDateString, model: model.modelName });
|
||||
if (customName) {
|
||||
prompt = `You are "${customName}".\n${prompt}`;
|
||||
}
|
||||
if (customInstructions) {
|
||||
prompt = `${prompt}\n${customInstructions}`;
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
new SystemMessagePromptTemplate(prompt),
|
||||
|
||||
@@ -10,6 +10,8 @@ const initializeFunctionsAgent = async ({
|
||||
tools,
|
||||
model,
|
||||
pastMessages,
|
||||
customName,
|
||||
customInstructions,
|
||||
currentDateString,
|
||||
...rest
|
||||
}) => {
|
||||
@@ -24,7 +26,13 @@ const initializeFunctionsAgent = async ({
|
||||
returnMessages: true,
|
||||
});
|
||||
|
||||
const prefix = addToolDescriptions(`Current Date: ${currentDateString}\n${PREFIX}`, tools);
|
||||
let prefix = addToolDescriptions(`Current Date: ${currentDateString}\n${PREFIX}`, tools);
|
||||
if (customName) {
|
||||
prefix = `You are "${customName}".\n${prefix}`;
|
||||
}
|
||||
if (customInstructions) {
|
||||
prefix = `${prefix}\n${customInstructions}`;
|
||||
}
|
||||
|
||||
return await initializeAgentExecutorWithOptions(tools, model, {
|
||||
agentType: 'openai-functions',
|
||||
|
||||
@@ -59,25 +59,57 @@ Submit a brief title in the conversation's language, following the parameter des
|
||||
</tool_description>
|
||||
</tools>`;
|
||||
|
||||
const genTranslationPrompt = (
|
||||
translationPrompt,
|
||||
) => `In this environment you have access to a set of tools you can use to translate text.
|
||||
|
||||
You may call them like this:
|
||||
<function_calls>
|
||||
<invoke>
|
||||
<tool_name>$TOOL_NAME</tool_name>
|
||||
<parameters>
|
||||
<$PARAMETER_NAME>$PARAMETER_VALUE</$PARAMETER_NAME>
|
||||
...
|
||||
</parameters>
|
||||
</invoke>
|
||||
</function_calls>
|
||||
|
||||
Here are the tools available:
|
||||
<tools>
|
||||
<tool_description>
|
||||
<tool_name>submit_translation</tool_name>
|
||||
<description>
|
||||
Submit a translation in the target language, following the parameter description and its language closely.
|
||||
</description>
|
||||
<parameters>
|
||||
<parameter>
|
||||
<name>translation</name>
|
||||
<type>string</type>
|
||||
<description>${translationPrompt}
|
||||
ONLY include the generated translation without quotations, nor its related key</description>
|
||||
</parameter>
|
||||
</parameters>
|
||||
</tool_description>
|
||||
</tools>`;
|
||||
|
||||
/**
|
||||
* Parses titles from title functions based on the provided prompt.
|
||||
* @param {string} prompt - The prompt containing the title function.
|
||||
* @returns {string} The parsed title. "New Chat" if no title is found.
|
||||
* Parses specified parameter from the provided prompt.
|
||||
* @param {string} prompt - The prompt containing the desired parameter.
|
||||
* @param {string} paramName - The name of the parameter to extract.
|
||||
* @returns {string} The parsed parameter's value or a default value if not found.
|
||||
*/
|
||||
function parseTitleFromPrompt(prompt) {
|
||||
const titleRegex = /<title>(.+?)<\/title>/;
|
||||
const titleMatch = prompt.match(titleRegex);
|
||||
function parseParamFromPrompt(prompt, paramName) {
|
||||
const paramRegex = new RegExp(`<${paramName}>([\\s\\S]+?)</${paramName}>`);
|
||||
const paramMatch = prompt.match(paramRegex);
|
||||
|
||||
if (titleMatch && titleMatch[1]) {
|
||||
const title = titleMatch[1].trim();
|
||||
|
||||
// // Capitalize the first letter of each word; Note: unnecessary due to title case prompting
|
||||
// const capitalizedTitle = title.replace(/\b\w/g, (char) => char.toUpperCase());
|
||||
|
||||
return title;
|
||||
if (paramMatch && paramMatch[1]) {
|
||||
return paramMatch[1].trim();
|
||||
}
|
||||
|
||||
return 'New Chat';
|
||||
if (prompt && prompt.length) {
|
||||
return `NO TOOL INVOCATION: ${prompt}`;
|
||||
}
|
||||
return `No ${paramName} provided`;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
@@ -85,5 +117,6 @@ module.exports = {
|
||||
titleInstruction,
|
||||
createTitlePrompt,
|
||||
titleFunctionPrompt,
|
||||
parseTitleFromPrompt,
|
||||
parseParamFromPrompt,
|
||||
genTranslationPrompt,
|
||||
};
|
||||
|
||||
@@ -40,7 +40,8 @@ class FakeClient extends BaseClient {
|
||||
};
|
||||
}
|
||||
|
||||
this.maxContextTokens = getModelMaxTokens(this.modelOptions.model) ?? 4097;
|
||||
this.maxContextTokens =
|
||||
this.options.maxContextTokens ?? getModelMaxTokens(this.modelOptions.model) ?? 4097;
|
||||
}
|
||||
buildMessages() {}
|
||||
getTokenCount(str) {
|
||||
|
||||
@@ -144,6 +144,7 @@ describe('OpenAIClient', () => {
|
||||
|
||||
const defaultOptions = {
|
||||
// debug: true,
|
||||
req: {},
|
||||
openaiApiKey: 'new-api-key',
|
||||
modelOptions: {
|
||||
model,
|
||||
@@ -157,12 +158,19 @@ describe('OpenAIClient', () => {
|
||||
azureOpenAIApiVersion: '2020-07-01-preview',
|
||||
};
|
||||
|
||||
let originalWarn;
|
||||
|
||||
beforeAll(() => {
|
||||
jest.spyOn(console, 'warn').mockImplementation(() => {});
|
||||
originalWarn = console.warn;
|
||||
console.warn = jest.fn();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
console.warn.mockRestore();
|
||||
console.warn = originalWarn;
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
console.warn.mockClear();
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -662,4 +670,35 @@ describe('OpenAIClient', () => {
|
||||
expect(constructorArgs.baseURL).toBe(expectedURL);
|
||||
});
|
||||
});
|
||||
|
||||
describe('checkVisionRequest functionality', () => {
|
||||
let client;
|
||||
const attachments = [{ type: 'image/png' }];
|
||||
|
||||
beforeEach(() => {
|
||||
client = new OpenAIClient('test-api-key', {
|
||||
endpoint: 'ollama',
|
||||
modelOptions: {
|
||||
model: 'initial-model',
|
||||
},
|
||||
modelsConfig: {
|
||||
ollama: ['initial-model', 'llava', 'other-model'],
|
||||
},
|
||||
});
|
||||
|
||||
client.defaultVisionModel = 'non-valid-default-model';
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
jest.restoreAllMocks();
|
||||
});
|
||||
|
||||
it('should set "llava" as the model if it is the first valid model when default validation fails', () => {
|
||||
client.checkVisionRequest(attachments);
|
||||
|
||||
expect(client.modelOptions.model).toBe('llava');
|
||||
expect(client.isVisionModel).toBeTruthy();
|
||||
expect(client.modelOptions.stop).toBeUndefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -1,11 +1,28 @@
|
||||
const { MeiliSearch } = require('meilisearch');
|
||||
const Message = require('~/models/schema/messageSchema');
|
||||
const Conversation = require('~/models/schema/convoSchema');
|
||||
const Message = require('~/models/schema/messageSchema');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const searchEnabled = process.env?.SEARCH?.toLowerCase() === 'true';
|
||||
let currentTimeout = null;
|
||||
|
||||
class MeiliSearchClient {
|
||||
static instance = null;
|
||||
|
||||
static getInstance() {
|
||||
if (!MeiliSearchClient.instance) {
|
||||
if (!process.env.MEILI_HOST || !process.env.MEILI_MASTER_KEY) {
|
||||
throw new Error('Meilisearch configuration is missing.');
|
||||
}
|
||||
MeiliSearchClient.instance = new MeiliSearch({
|
||||
host: process.env.MEILI_HOST,
|
||||
apiKey: process.env.MEILI_MASTER_KEY,
|
||||
});
|
||||
}
|
||||
return MeiliSearchClient.instance;
|
||||
}
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
async function indexSync(req, res, next) {
|
||||
if (!searchEnabled) {
|
||||
@@ -13,20 +30,10 @@ async function indexSync(req, res, next) {
|
||||
}
|
||||
|
||||
try {
|
||||
if (!process.env.MEILI_HOST || !process.env.MEILI_MASTER_KEY || !searchEnabled) {
|
||||
throw new Error('Meilisearch not configured, search will be disabled.');
|
||||
}
|
||||
|
||||
const client = new MeiliSearch({
|
||||
host: process.env.MEILI_HOST,
|
||||
apiKey: process.env.MEILI_MASTER_KEY,
|
||||
});
|
||||
const client = MeiliSearchClient.getInstance();
|
||||
|
||||
const { status } = await client.health();
|
||||
// logger.debug(`[indexSync] Meilisearch: ${status}`);
|
||||
const result = status === 'available' && !!process.env.SEARCH;
|
||||
|
||||
if (!result) {
|
||||
if (status !== 'available' || !process.env.SEARCH) {
|
||||
throw new Error('Meilisearch not available');
|
||||
}
|
||||
|
||||
@@ -37,12 +44,8 @@ async function indexSync(req, res, next) {
|
||||
const messagesIndexed = messages.numberOfDocuments;
|
||||
const convosIndexed = convos.numberOfDocuments;
|
||||
|
||||
logger.debug(
|
||||
`[indexSync] There are ${messageCount} messages in the database, ${messagesIndexed} indexed`,
|
||||
);
|
||||
logger.debug(
|
||||
`[indexSync] There are ${convoCount} convos in the database, ${convosIndexed} indexed`,
|
||||
);
|
||||
logger.debug(`[indexSync] There are ${messageCount} messages and ${messagesIndexed} indexed`);
|
||||
logger.debug(`[indexSync] There are ${convoCount} convos and ${convosIndexed} indexed`);
|
||||
|
||||
if (messageCount !== messagesIndexed) {
|
||||
logger.debug('[indexSync] Messages out of sync, indexing');
|
||||
@@ -54,7 +57,6 @@ async function indexSync(req, res, next) {
|
||||
Conversation.syncWithMeili();
|
||||
}
|
||||
} catch (err) {
|
||||
// logger.debug('[indexSync] in index sync');
|
||||
if (err.message.includes('not found')) {
|
||||
logger.debug('[indexSync] Creating indices...');
|
||||
currentTimeout = setTimeout(async () => {
|
||||
|
||||
@@ -62,8 +62,24 @@ const deleteAction = async (searchParams, session = null) => {
|
||||
return await Action.findOneAndDelete(searchParams, options).lean();
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
updateAction,
|
||||
getActions,
|
||||
deleteAction,
|
||||
/**
|
||||
* Deletes actions by params, within a transaction session if provided.
|
||||
*
|
||||
* @param {Object} searchParams - The search parameters to find the actions to delete.
|
||||
* @param {string} searchParams.action_id - The ID of the action(s) to delete.
|
||||
* @param {string} searchParams.user - The user ID of the action's author.
|
||||
* @param {mongoose.ClientSession} [session] - The transaction session to use (optional).
|
||||
* @returns {Promise<Number>} A promise that resolves to the number of deleted action documents.
|
||||
*/
|
||||
const deleteActions = async (searchParams, session = null) => {
|
||||
const options = session ? { session } : {};
|
||||
const result = await Action.deleteMany(searchParams, options);
|
||||
return result.deletedCount;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getActions,
|
||||
updateAction,
|
||||
deleteAction,
|
||||
deleteActions,
|
||||
};
|
||||
|
||||
@@ -39,8 +39,21 @@ const getAssistants = async (searchParams) => {
|
||||
return await Assistant.find(searchParams).lean();
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes an assistant based on the provided ID.
|
||||
*
|
||||
* @param {Object} searchParams - The search parameters to find the assistant to delete.
|
||||
* @param {string} searchParams.assistant_id - The ID of the assistant to delete.
|
||||
* @param {string} searchParams.user - The user ID of the assistant's author.
|
||||
* @returns {Promise<void>} Resolves when the assistant has been successfully deleted.
|
||||
*/
|
||||
const deleteAssistant = async (searchParams) => {
|
||||
return await Assistant.findOneAndDelete(searchParams);
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
updateAssistant,
|
||||
deleteAssistant,
|
||||
getAssistants,
|
||||
getAssistant,
|
||||
};
|
||||
|
||||
@@ -2,6 +2,12 @@ const Conversation = require('./schema/convoSchema');
|
||||
const { getMessages, deleteMessages } = require('./Message');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
/**
|
||||
* Retrieves a single conversation for a given user and conversation ID.
|
||||
* @param {string} user - The user's ID.
|
||||
* @param {string} conversationId - The conversation's ID.
|
||||
* @returns {Promise<TConversation>} The conversation object.
|
||||
*/
|
||||
const getConvo = async (user, conversationId) => {
|
||||
try {
|
||||
return await Conversation.findOne({ user, conversationId }).lean();
|
||||
@@ -30,11 +36,35 @@ module.exports = {
|
||||
return { message: 'Error saving conversation' };
|
||||
}
|
||||
},
|
||||
getConvosByPage: async (user, pageNumber = 1, pageSize = 25) => {
|
||||
bulkSaveConvos: async (conversations) => {
|
||||
try {
|
||||
const totalConvos = (await Conversation.countDocuments({ user })) || 1;
|
||||
const bulkOps = conversations.map((convo) => ({
|
||||
updateOne: {
|
||||
filter: { conversationId: convo.conversationId, user: convo.user },
|
||||
update: convo,
|
||||
upsert: true,
|
||||
timestamps: false,
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await Conversation.bulkWrite(bulkOps);
|
||||
return result;
|
||||
} catch (error) {
|
||||
logger.error('[saveBulkConversations] Error saving conversations in bulk', error);
|
||||
throw new Error('Failed to save conversations in bulk.');
|
||||
}
|
||||
},
|
||||
getConvosByPage: async (user, pageNumber = 1, pageSize = 25, isArchived = false) => {
|
||||
const query = { user };
|
||||
if (isArchived) {
|
||||
query.isArchived = true;
|
||||
} else {
|
||||
query.$or = [{ isArchived: false }, { isArchived: { $exists: false } }];
|
||||
}
|
||||
try {
|
||||
const totalConvos = (await Conversation.countDocuments(query)) || 1;
|
||||
const totalPages = Math.ceil(totalConvos / pageSize);
|
||||
const convos = await Conversation.find({ user })
|
||||
const convos = await Conversation.find(query)
|
||||
.sort({ updatedAt: -1 })
|
||||
.skip((pageNumber - 1) * pageSize)
|
||||
.limit(pageSize)
|
||||
|
||||
@@ -10,6 +10,7 @@ module.exports = {
|
||||
async saveMessage({
|
||||
user,
|
||||
endpoint,
|
||||
iconURL,
|
||||
messageId,
|
||||
newMessageId,
|
||||
conversationId,
|
||||
@@ -35,6 +36,7 @@ module.exports = {
|
||||
|
||||
const update = {
|
||||
user,
|
||||
iconURL,
|
||||
endpoint,
|
||||
messageId: newMessageId || messageId,
|
||||
conversationId,
|
||||
@@ -72,6 +74,25 @@ module.exports = {
|
||||
throw new Error('Failed to save message.');
|
||||
}
|
||||
},
|
||||
|
||||
async bulkSaveMessages(messages) {
|
||||
try {
|
||||
const bulkOps = messages.map((message) => ({
|
||||
updateOne: {
|
||||
filter: { messageId: message.messageId },
|
||||
update: message,
|
||||
upsert: true,
|
||||
},
|
||||
}));
|
||||
|
||||
const result = await Message.bulkWrite(bulkOps);
|
||||
return result;
|
||||
} catch (err) {
|
||||
logger.error('Error saving messages in bulk:', err);
|
||||
throw new Error('Failed to save messages in bulk.');
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Records a message in the database.
|
||||
*
|
||||
|
||||
@@ -39,6 +39,12 @@ module.exports = {
|
||||
try {
|
||||
const setter = { $set: {} };
|
||||
const update = { presetId, ...preset };
|
||||
if (preset.tools && Array.isArray(preset.tools)) {
|
||||
update.tools =
|
||||
preset.tools
|
||||
.map((tool) => tool?.pluginKey ?? tool)
|
||||
.filter((toolName) => typeof toolName === 'string') ?? [];
|
||||
}
|
||||
if (newPresetId) {
|
||||
update.presetId = newPresetId;
|
||||
}
|
||||
|
||||
89
api/models/Share.js
Normal file
89
api/models/Share.js
Normal file
@@ -0,0 +1,89 @@
|
||||
const crypto = require('crypto');
|
||||
const { getMessages } = require('./Message');
|
||||
const SharedLink = require('./schema/shareSchema');
|
||||
const logger = require('~/config/winston');
|
||||
|
||||
module.exports = {
|
||||
SharedLink,
|
||||
getSharedMessages: async (shareId) => {
|
||||
try {
|
||||
const share = await SharedLink.findOne({ shareId })
|
||||
.populate({
|
||||
path: 'messages',
|
||||
select: '-_id -__v -user',
|
||||
})
|
||||
.select('-_id -__v -user')
|
||||
.lean();
|
||||
|
||||
if (!share || !share.conversationId || !share.isPublic) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return share;
|
||||
} catch (error) {
|
||||
logger.error('[getShare] Error getting share link', error);
|
||||
return { message: 'Error getting share link' };
|
||||
}
|
||||
},
|
||||
|
||||
getSharedLinks: async (user, pageNumber = 1, pageSize = 25, isPublic = true) => {
|
||||
const query = { user, isPublic };
|
||||
try {
|
||||
const totalConvos = (await SharedLink.countDocuments(query)) || 1;
|
||||
const totalPages = Math.ceil(totalConvos / pageSize);
|
||||
const shares = await SharedLink.find(query)
|
||||
.sort({ updatedAt: -1 })
|
||||
.skip((pageNumber - 1) * pageSize)
|
||||
.limit(pageSize)
|
||||
.select('-_id -__v -user')
|
||||
.lean();
|
||||
|
||||
return { sharedLinks: shares, pages: totalPages, pageNumber, pageSize };
|
||||
} catch (error) {
|
||||
logger.error('[getShareByPage] Error getting shares', error);
|
||||
return { message: 'Error getting shares' };
|
||||
}
|
||||
},
|
||||
|
||||
createSharedLink: async (user, { conversationId, ...shareData }) => {
|
||||
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
|
||||
if (share) {
|
||||
return share;
|
||||
}
|
||||
|
||||
try {
|
||||
const shareId = crypto.randomUUID();
|
||||
const messages = await getMessages({ conversationId });
|
||||
const update = { ...shareData, shareId, messages, user };
|
||||
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: true,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[saveShareMessage] Error saving conversation', error);
|
||||
return { message: 'Error saving conversation' };
|
||||
}
|
||||
},
|
||||
|
||||
updateSharedLink: async (user, { conversationId, ...shareData }) => {
|
||||
const share = await SharedLink.findOne({ conversationId }).select('-_id -__v -user').lean();
|
||||
if (!share) {
|
||||
return { message: 'Share not found' };
|
||||
}
|
||||
// update messages to the latest
|
||||
const messages = await getMessages({ conversationId });
|
||||
const update = { ...shareData, messages, user };
|
||||
return await SharedLink.findOneAndUpdate({ conversationId: conversationId, user }, update, {
|
||||
new: true,
|
||||
upsert: false,
|
||||
});
|
||||
},
|
||||
|
||||
deleteSharedLink: async (user, { shareId }) => {
|
||||
const share = await SharedLink.findOne({ shareId, user });
|
||||
if (!share) {
|
||||
return { message: 'Share not found' };
|
||||
}
|
||||
return await SharedLink.findOneAndDelete({ shareId, user });
|
||||
},
|
||||
};
|
||||
@@ -155,7 +155,7 @@ const createMeiliMongooseModel = function ({ index, attributesToIndex }) {
|
||||
function (results, value, key) {
|
||||
return { ...results, [key]: 1 };
|
||||
},
|
||||
{ _id: 1 },
|
||||
{ _id: 1, __v: 1 },
|
||||
),
|
||||
).lean();
|
||||
|
||||
@@ -348,7 +348,7 @@ module.exports = function mongoMeili(schema, options) {
|
||||
try {
|
||||
meiliDoc = await client.index('convos').getDocument(doc.conversationId);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
logger.debug(
|
||||
'[MeiliMongooseModel.findOneAndUpdate] Convo not found in MeiliSearch and will index ' +
|
||||
doc.conversationId,
|
||||
error,
|
||||
|
||||
@@ -88,6 +88,28 @@ const conversationPreset = {
|
||||
instructions: {
|
||||
type: String,
|
||||
},
|
||||
stop: { type: [{ type: String }], default: undefined },
|
||||
isArchived: {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
},
|
||||
/* UI Components */
|
||||
iconURL: {
|
||||
type: String,
|
||||
},
|
||||
greeting: {
|
||||
type: String,
|
||||
},
|
||||
spec: {
|
||||
type: String,
|
||||
},
|
||||
tools: { type: [{ type: String }], default: undefined },
|
||||
maxContextTokens: {
|
||||
type: Number,
|
||||
},
|
||||
max_tokens: {
|
||||
type: Number,
|
||||
},
|
||||
};
|
||||
|
||||
const agentOptions = {
|
||||
|
||||
@@ -110,6 +110,10 @@ const messageSchema = mongoose.Schema(
|
||||
thread_id: {
|
||||
type: String,
|
||||
},
|
||||
/* frontend components */
|
||||
iconURL: {
|
||||
type: String,
|
||||
},
|
||||
},
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
38
api/models/schema/shareSchema.js
Normal file
38
api/models/schema/shareSchema.js
Normal file
@@ -0,0 +1,38 @@
|
||||
const mongoose = require('mongoose');
|
||||
|
||||
const shareSchema = mongoose.Schema(
|
||||
{
|
||||
conversationId: {
|
||||
type: String,
|
||||
required: true,
|
||||
},
|
||||
title: {
|
||||
type: String,
|
||||
index: true,
|
||||
},
|
||||
user: {
|
||||
type: String,
|
||||
index: true,
|
||||
},
|
||||
messages: [{ type: mongoose.Schema.Types.ObjectId, ref: 'Message' }],
|
||||
shareId: {
|
||||
type: String,
|
||||
index: true,
|
||||
},
|
||||
isPublic: {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
},
|
||||
isVisible: {
|
||||
type: Boolean,
|
||||
default: false,
|
||||
},
|
||||
isAnonymous: {
|
||||
type: Boolean,
|
||||
default: true,
|
||||
},
|
||||
},
|
||||
{ timestamps: true },
|
||||
);
|
||||
|
||||
module.exports = mongoose.model('SharedLink', shareSchema);
|
||||
@@ -40,7 +40,7 @@ const spendTokens = async (txData, tokenUsage) => {
|
||||
});
|
||||
}
|
||||
|
||||
if (!completionTokens) {
|
||||
if (!completionTokens && isNaN(completionTokens)) {
|
||||
logger.debug('[spendTokens] !completionTokens', { prompt, completion });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ const tokenValues = {
|
||||
'4k': { prompt: 1.5, completion: 2 },
|
||||
'16k': { prompt: 3, completion: 4 },
|
||||
'gpt-3.5-turbo-1106': { prompt: 1, completion: 2 },
|
||||
'gpt-4o': { prompt: 5, completion: 15 },
|
||||
'gpt-4-1106': { prompt: 10, completion: 30 },
|
||||
'gpt-3.5-turbo-0125': { prompt: 0.5, completion: 1.5 },
|
||||
'claude-3-opus': { prompt: 15, completion: 75 },
|
||||
@@ -52,6 +53,8 @@ const getValueKey = (model, endpoint) => {
|
||||
return 'gpt-3.5-turbo-1106';
|
||||
} else if (modelName.includes('gpt-3.5')) {
|
||||
return '4k';
|
||||
} else if (modelName.includes('gpt-4o')) {
|
||||
return 'gpt-4o';
|
||||
} else if (modelName.includes('gpt-4-vision')) {
|
||||
return 'gpt-4-1106';
|
||||
} else if (modelName.includes('gpt-4-1106')) {
|
||||
|
||||
@@ -41,6 +41,13 @@ describe('getValueKey', () => {
|
||||
expect(getValueKey('gpt-4-turbo')).toBe('gpt-4-1106');
|
||||
expect(getValueKey('gpt-4-0125')).toBe('gpt-4-1106');
|
||||
});
|
||||
|
||||
it('should return "gpt-4o" for model type of "gpt-4o"', () => {
|
||||
expect(getValueKey('gpt-4o-2024-05-13')).toBe('gpt-4o');
|
||||
expect(getValueKey('openai/gpt-4o')).toBe('gpt-4o');
|
||||
expect(getValueKey('gpt-4o-turbo')).toBe('gpt-4o');
|
||||
expect(getValueKey('gpt-4o-0125')).toBe('gpt-4o');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getMultiplier', () => {
|
||||
@@ -84,6 +91,17 @@ describe('getMultiplier', () => {
|
||||
);
|
||||
});
|
||||
|
||||
it('should return the correct multiplier for gpt-4o', () => {
|
||||
const valueKey = getValueKey('gpt-4o-2024-05-13');
|
||||
expect(getMultiplier({ valueKey, tokenType: 'prompt' })).toBe(tokenValues['gpt-4o'].prompt);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).toBe(
|
||||
tokenValues['gpt-4o'].completion,
|
||||
);
|
||||
expect(getMultiplier({ valueKey, tokenType: 'completion' })).not.toBe(
|
||||
tokenValues['gpt-4-1106'].completion,
|
||||
);
|
||||
});
|
||||
|
||||
it('should derive the valueKey from the model if not provided for new models', () => {
|
||||
expect(
|
||||
getMultiplier({ tokenType: 'prompt', model: 'gpt-3.5-turbo-1106-some-other-info' }),
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@librechat/backend",
|
||||
"version": "0.7.1",
|
||||
"version": "0.7.2",
|
||||
"description": "",
|
||||
"scripts": {
|
||||
"start": "echo 'please run this from the root directory'",
|
||||
@@ -41,6 +41,7 @@
|
||||
"@langchain/community": "^0.0.46",
|
||||
"@langchain/google-genai": "^0.0.11",
|
||||
"@langchain/google-vertexai": "^0.0.5",
|
||||
"agenda": "^5.0.0",
|
||||
"axios": "^1.3.4",
|
||||
"bcryptjs": "^2.4.3",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
@@ -74,7 +75,8 @@
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"nodejs-gpt": "^1.37.4",
|
||||
"nodemailer": "^6.9.4",
|
||||
"openai": "4.36.0",
|
||||
"ollama": "^0.5.0",
|
||||
"openai": "^4.47.1",
|
||||
"openai-chat-tokens": "^0.2.8",
|
||||
"openid-client": "^5.4.2",
|
||||
"passport": "^0.6.0",
|
||||
@@ -87,7 +89,7 @@
|
||||
"passport-local": "^1.0.0",
|
||||
"pino": "^8.12.1",
|
||||
"sharp": "^0.32.6",
|
||||
"tiktoken": "^1.0.10",
|
||||
"tiktoken": "^1.0.15",
|
||||
"traverse": "^0.6.7",
|
||||
"ua-parser-js": "^1.0.36",
|
||||
"winston": "^3.11.0",
|
||||
|
||||
@@ -16,10 +16,28 @@ async function endpointController(req, res) {
|
||||
/** @type {TEndpointsConfig} */
|
||||
const mergedConfig = { ...defaultEndpointsConfig, ...customConfigEndpoints };
|
||||
if (mergedConfig[EModelEndpoint.assistants] && req.app.locals?.[EModelEndpoint.assistants]) {
|
||||
const { disableBuilder, retrievalModels, capabilities, ..._rest } =
|
||||
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
|
||||
req.app.locals[EModelEndpoint.assistants];
|
||||
|
||||
mergedConfig[EModelEndpoint.assistants] = {
|
||||
...mergedConfig[EModelEndpoint.assistants],
|
||||
version,
|
||||
retrievalModels,
|
||||
disableBuilder,
|
||||
capabilities,
|
||||
};
|
||||
}
|
||||
|
||||
if (
|
||||
mergedConfig[EModelEndpoint.azureAssistants] &&
|
||||
req.app.locals?.[EModelEndpoint.azureAssistants]
|
||||
) {
|
||||
const { disableBuilder, retrievalModels, capabilities, version, ..._rest } =
|
||||
req.app.locals[EModelEndpoint.azureAssistants];
|
||||
|
||||
mergedConfig[EModelEndpoint.azureAssistants] = {
|
||||
...mergedConfig[EModelEndpoint.azureAssistants],
|
||||
version,
|
||||
retrievalModels,
|
||||
disableBuilder,
|
||||
capabilities,
|
||||
|
||||
@@ -55,19 +55,27 @@ const getAvailablePluginsController = async (req, res) => {
|
||||
return;
|
||||
}
|
||||
|
||||
/** @type {{ filteredTools: string[], includedTools: string[] }} */
|
||||
const { filteredTools = [], includedTools = [] } = req.app.locals;
|
||||
const pluginManifest = await fs.readFile(req.app.locals.paths.pluginManifest, 'utf8');
|
||||
|
||||
const jsonData = JSON.parse(pluginManifest);
|
||||
/** @type {TPlugin[]} */
|
||||
|
||||
const uniquePlugins = filterUniquePlugins(jsonData);
|
||||
const authenticatedPlugins = uniquePlugins.map((plugin) => {
|
||||
if (isPluginAuthenticated(plugin)) {
|
||||
return { ...plugin, authenticated: true };
|
||||
} else {
|
||||
return plugin;
|
||||
}
|
||||
});
|
||||
const plugins = await addOpenAPISpecs(authenticatedPlugins);
|
||||
let authenticatedPlugins = [];
|
||||
for (const plugin of uniquePlugins) {
|
||||
authenticatedPlugins.push(
|
||||
isPluginAuthenticated(plugin) ? { ...plugin, authenticated: true } : plugin,
|
||||
);
|
||||
}
|
||||
|
||||
let plugins = await addOpenAPISpecs(authenticatedPlugins);
|
||||
|
||||
if (includedTools.length > 0) {
|
||||
plugins = plugins.filter((plugin) => includedTools.includes(plugin.pluginKey));
|
||||
} else {
|
||||
plugins = plugins.filter((plugin) => !filteredTools.includes(plugin.pluginKey));
|
||||
}
|
||||
|
||||
await cache.set(CacheKeys.PLUGINS, plugins);
|
||||
res.status(200).json(plugins);
|
||||
} catch (error) {
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
const { v4 } = require('uuid');
|
||||
const express = require('express');
|
||||
const {
|
||||
Constants,
|
||||
RunStatus,
|
||||
CacheKeys,
|
||||
FileSources,
|
||||
ContentTypes,
|
||||
EModelEndpoint,
|
||||
ViolationTypes,
|
||||
ImageVisionTool,
|
||||
checkOpenAIStorage,
|
||||
AssistantStreamEvents,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
@@ -21,27 +20,18 @@ const {
|
||||
} = require('~/server/services/Threads');
|
||||
const { sendResponse, sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
|
||||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||
const { addTitle, initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { formatMessage, createVisionPrompt } = require('~/app/clients/prompts');
|
||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||
const { getTransactions } = require('~/models/Transaction');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const router = express.Router();
|
||||
const {
|
||||
setHeaders,
|
||||
handleAbort,
|
||||
validateModel,
|
||||
handleAbortError,
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
|
||||
router.post('/abort', handleAbort());
|
||||
const { handleAbortError } = require('~/server/middleware');
|
||||
|
||||
const ten_minutes = 1000 * 60 * 10;
|
||||
|
||||
@@ -49,16 +39,17 @@ const ten_minutes = 1000 * 60 * 10;
|
||||
* @route POST /
|
||||
* @desc Chat with an assistant
|
||||
* @access Public
|
||||
* @param {express.Request} req - The request object, containing the request data.
|
||||
* @param {express.Response} res - The response object, used to send back a response.
|
||||
* @param {Express.Request} req - The request object, containing the request data.
|
||||
* @param {Express.Response} res - The response object, used to send back a response.
|
||||
* @returns {void}
|
||||
*/
|
||||
router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res) => {
|
||||
const chatV1 = async (req, res) => {
|
||||
logger.debug('[/assistants/chat/] req.body', req.body);
|
||||
|
||||
const {
|
||||
text,
|
||||
model,
|
||||
endpoint,
|
||||
files = [],
|
||||
promptPrefix,
|
||||
assistant_id,
|
||||
@@ -70,7 +61,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
} = req.body;
|
||||
|
||||
/** @type {Partial<TAssistantEndpoint>} */
|
||||
const assistantsConfig = req.app.locals?.[EModelEndpoint.assistants];
|
||||
const assistantsConfig = req.app.locals?.[endpoint];
|
||||
|
||||
if (assistantsConfig) {
|
||||
const { supportedIds, excludedIds } = assistantsConfig;
|
||||
@@ -138,7 +129,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
user: req.user.id,
|
||||
shouldSaveMessage: false,
|
||||
messageId: responseMessageId,
|
||||
endpoint: EModelEndpoint.assistants,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
if (error.message === 'Run cancelled') {
|
||||
@@ -149,7 +140,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
logger.debug('[/assistants/chat/] Request aborted on close');
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
req.app.locals?.[EModelEndpoint.azureOpenAI].assistants
|
||||
endpoint === EModelEndpoint.azureAssistants
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
: ''
|
||||
}`;
|
||||
@@ -205,6 +196,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
const runMessages = await checkMessageGaps({
|
||||
openai,
|
||||
run_id,
|
||||
endpoint,
|
||||
thread_id,
|
||||
conversationId,
|
||||
latestMessageId: responseMessageId,
|
||||
@@ -311,8 +303,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
});
|
||||
};
|
||||
|
||||
/** @type {{ openai: OpenAIClient }} */
|
||||
const { openai: _openai, client } = await initializeClient({
|
||||
const { openai: _openai, client } = await getOpenAIClient({
|
||||
req,
|
||||
res,
|
||||
endpointOption: req.body.endpointOption,
|
||||
@@ -370,10 +361,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
|
||||
/** @type {MongoFile[]} */
|
||||
const attachments = await req.body.endpointOption.attachments;
|
||||
if (
|
||||
attachments &&
|
||||
attachments.every((attachment) => attachment.source === FileSources.openai)
|
||||
) {
|
||||
if (attachments && attachments.every((attachment) => checkOpenAIStorage(attachment.source))) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -431,7 +419,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
|
||||
if (processedFiles) {
|
||||
for (const file of processedFiles) {
|
||||
if (file.source !== FileSources.openai) {
|
||||
if (!checkOpenAIStorage(file.source)) {
|
||||
attachedFileIds.delete(file.file_id);
|
||||
const index = file_ids.indexOf(file.file_id);
|
||||
if (index > -1) {
|
||||
@@ -467,6 +455,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
assistant_id,
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
previousMessages.push(requestMessage);
|
||||
@@ -476,7 +465,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
|
||||
conversation = {
|
||||
conversationId,
|
||||
endpoint: EModelEndpoint.assistants,
|
||||
endpoint,
|
||||
promptPrefix: promptPrefix,
|
||||
instructions: instructions,
|
||||
assistant_id,
|
||||
@@ -513,7 +502,8 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
let response;
|
||||
|
||||
const processRun = async (retry = false) => {
|
||||
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
if (endpoint === EModelEndpoint.azureAssistants) {
|
||||
body.model = openai._options.model;
|
||||
openai.attachedFileIds = attachedFileIds;
|
||||
openai.visionPromise = visionPromise;
|
||||
if (retry) {
|
||||
@@ -602,6 +592,7 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
assistant_id,
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
sendMessage(res, {
|
||||
@@ -654,6 +645,6 @@ router.post('/', validateModel, buildEndpointOption, setHeaders, async (req, res
|
||||
} catch (error) {
|
||||
await handleError(error);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = router;
|
||||
module.exports = chatV1;
|
||||
618
api/server/controllers/assistants/chatV2.js
Normal file
618
api/server/controllers/assistants/chatV2.js
Normal file
@@ -0,0 +1,618 @@
|
||||
const { v4 } = require('uuid');
|
||||
const {
|
||||
Constants,
|
||||
RunStatus,
|
||||
CacheKeys,
|
||||
ContentTypes,
|
||||
ToolCallTypes,
|
||||
EModelEndpoint,
|
||||
ViolationTypes,
|
||||
retrievalMimeTypes,
|
||||
AssistantStreamEvents,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
initThread,
|
||||
recordUsage,
|
||||
saveUserMessage,
|
||||
checkMessageGaps,
|
||||
addThreadMetadata,
|
||||
saveAssistantMessage,
|
||||
} = require('~/server/services/Threads');
|
||||
const { sendResponse, sendMessage, sleep, isEnabled, countTokens } = require('~/server/utils');
|
||||
const { runAssistant, createOnTextProgress } = require('~/server/services/AssistantService');
|
||||
const { createRun, StreamRunManager } = require('~/server/services/Runs');
|
||||
const { addTitle } = require('~/server/services/Endpoints/assistants');
|
||||
const { getTransactions } = require('~/models/Transaction');
|
||||
const checkBalance = require('~/models/checkBalance');
|
||||
const { getConvo } = require('~/models/Conversation');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { getModelMaxTokens } = require('~/utils');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const { handleAbortError } = require('~/server/middleware');
|
||||
|
||||
const ten_minutes = 1000 * 60 * 10;
|
||||
|
||||
/**
|
||||
* @route POST /
|
||||
* @desc Chat with an assistant
|
||||
* @access Public
|
||||
* @param {Express.Request} req - The request object, containing the request data.
|
||||
* @param {Express.Response} res - The response object, used to send back a response.
|
||||
* @returns {void}
|
||||
*/
|
||||
const chatV2 = async (req, res) => {
|
||||
logger.debug('[/assistants/chat/] req.body', req.body);
|
||||
|
||||
/** @type {{ files: MongoFile[]}} */
|
||||
const {
|
||||
text,
|
||||
model,
|
||||
endpoint,
|
||||
files = [],
|
||||
promptPrefix,
|
||||
assistant_id,
|
||||
instructions,
|
||||
thread_id: _thread_id,
|
||||
messageId: _messageId,
|
||||
conversationId: convoId,
|
||||
parentMessageId: _parentId = Constants.NO_PARENT,
|
||||
} = req.body;
|
||||
|
||||
/** @type {Partial<TAssistantEndpoint>} */
|
||||
const assistantsConfig = req.app.locals?.[endpoint];
|
||||
|
||||
if (assistantsConfig) {
|
||||
const { supportedIds, excludedIds } = assistantsConfig;
|
||||
const error = { message: 'Assistant not supported' };
|
||||
if (supportedIds?.length && !supportedIds.includes(assistant_id)) {
|
||||
return await handleAbortError(res, req, error, {
|
||||
sender: 'System',
|
||||
conversationId: convoId,
|
||||
messageId: v4(),
|
||||
parentMessageId: _messageId,
|
||||
error,
|
||||
});
|
||||
} else if (excludedIds?.length && excludedIds.includes(assistant_id)) {
|
||||
return await handleAbortError(res, req, error, {
|
||||
sender: 'System',
|
||||
conversationId: convoId,
|
||||
messageId: v4(),
|
||||
parentMessageId: _messageId,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/** @type {OpenAIClient} */
|
||||
let openai;
|
||||
/** @type {string|undefined} - the current thread id */
|
||||
let thread_id = _thread_id;
|
||||
/** @type {string|undefined} - the current run id */
|
||||
let run_id;
|
||||
/** @type {string|undefined} - the parent messageId */
|
||||
let parentMessageId = _parentId;
|
||||
/** @type {TMessage[]} */
|
||||
let previousMessages = [];
|
||||
/** @type {import('librechat-data-provider').TConversation | null} */
|
||||
let conversation = null;
|
||||
/** @type {string[]} */
|
||||
let file_ids = [];
|
||||
/** @type {Set<string>} */
|
||||
let attachedFileIds = new Set();
|
||||
/** @type {TMessage | null} */
|
||||
let requestMessage = null;
|
||||
|
||||
const userMessageId = v4();
|
||||
const responseMessageId = v4();
|
||||
|
||||
/** @type {string} - The conversation UUID - created if undefined */
|
||||
const conversationId = convoId ?? v4();
|
||||
|
||||
const cache = getLogStores(CacheKeys.ABORT_KEYS);
|
||||
const cacheKey = `${req.user.id}:${conversationId}`;
|
||||
|
||||
/** @type {Run | undefined} - The completed run, undefined if incomplete */
|
||||
let completedRun;
|
||||
|
||||
const handleError = async (error) => {
|
||||
const defaultErrorMessage =
|
||||
'The Assistant run failed to initialize. Try sending a message in a new conversation.';
|
||||
const messageData = {
|
||||
thread_id,
|
||||
assistant_id,
|
||||
conversationId,
|
||||
parentMessageId,
|
||||
sender: 'System',
|
||||
user: req.user.id,
|
||||
shouldSaveMessage: false,
|
||||
messageId: responseMessageId,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
if (error.message === 'Run cancelled') {
|
||||
return res.end();
|
||||
} else if (error.message === 'Request closed' && completedRun) {
|
||||
return;
|
||||
} else if (error.message === 'Request closed') {
|
||||
logger.debug('[/assistants/chat/] Request aborted on close');
|
||||
} else if (/Files.*are invalid/.test(error.message)) {
|
||||
const errorMessage = `Files are invalid, or may not have uploaded yet.${
|
||||
endpoint === EModelEndpoint.azureAssistants
|
||||
? ' If using Azure OpenAI, files are only available in the region of the assistant\'s model at the time of upload.'
|
||||
: ''
|
||||
}`;
|
||||
return sendResponse(res, messageData, errorMessage);
|
||||
} else if (error?.message?.includes('string too long')) {
|
||||
return sendResponse(
|
||||
res,
|
||||
messageData,
|
||||
'Message too long. The Assistants API has a limit of 32,768 characters per message. Please shorten it and try again.',
|
||||
);
|
||||
} else if (error?.message?.includes(ViolationTypes.TOKEN_BALANCE)) {
|
||||
return sendResponse(res, messageData, error.message);
|
||||
} else {
|
||||
logger.error('[/assistants/chat/]', error);
|
||||
}
|
||||
|
||||
if (!openai || !thread_id || !run_id) {
|
||||
return sendResponse(res, messageData, defaultErrorMessage);
|
||||
}
|
||||
|
||||
await sleep(2000);
|
||||
|
||||
try {
|
||||
const status = await cache.get(cacheKey);
|
||||
if (status === 'cancelled') {
|
||||
logger.debug('[/assistants/chat/] Run already cancelled');
|
||||
return res.end();
|
||||
}
|
||||
await cache.delete(cacheKey);
|
||||
const cancelledRun = await openai.beta.threads.runs.cancel(thread_id, run_id);
|
||||
logger.debug('[/assistants/chat/] Cancelled run:', cancelledRun);
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error cancelling run', error);
|
||||
}
|
||||
|
||||
await sleep(2000);
|
||||
|
||||
let run;
|
||||
try {
|
||||
run = await openai.beta.threads.runs.retrieve(thread_id, run_id);
|
||||
await recordUsage({
|
||||
...run.usage,
|
||||
model: run.model,
|
||||
user: req.user.id,
|
||||
conversationId,
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error fetching or processing run', error);
|
||||
}
|
||||
|
||||
let finalEvent;
|
||||
try {
|
||||
const runMessages = await checkMessageGaps({
|
||||
openai,
|
||||
run_id,
|
||||
endpoint,
|
||||
thread_id,
|
||||
conversationId,
|
||||
latestMessageId: responseMessageId,
|
||||
});
|
||||
|
||||
const errorContentPart = {
|
||||
text: {
|
||||
value:
|
||||
error?.message ?? 'There was an error processing your request. Please try again later.',
|
||||
},
|
||||
type: ContentTypes.ERROR,
|
||||
};
|
||||
|
||||
if (!Array.isArray(runMessages[runMessages.length - 1]?.content)) {
|
||||
runMessages[runMessages.length - 1].content = [errorContentPart];
|
||||
} else {
|
||||
const contentParts = runMessages[runMessages.length - 1].content;
|
||||
for (let i = 0; i < contentParts.length; i++) {
|
||||
const currentPart = contentParts[i];
|
||||
/** @type {CodeToolCall | RetrievalToolCall | FunctionToolCall | undefined} */
|
||||
const toolCall = currentPart?.[ContentTypes.TOOL_CALL];
|
||||
if (
|
||||
toolCall &&
|
||||
toolCall?.function &&
|
||||
!(toolCall?.function?.output || toolCall?.function?.output?.length)
|
||||
) {
|
||||
contentParts[i] = {
|
||||
...currentPart,
|
||||
[ContentTypes.TOOL_CALL]: {
|
||||
...toolCall,
|
||||
function: {
|
||||
...toolCall.function,
|
||||
output: 'error processing tool',
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
runMessages[runMessages.length - 1].content.push(errorContentPart);
|
||||
}
|
||||
|
||||
finalEvent = {
|
||||
final: true,
|
||||
conversation: await getConvo(req.user.id, conversationId),
|
||||
runMessages,
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/chat/] Error finalizing error process', error);
|
||||
return sendResponse(res, messageData, 'The Assistant run failed');
|
||||
}
|
||||
|
||||
return sendResponse(res, finalEvent);
|
||||
};
|
||||
|
||||
try {
|
||||
res.on('close', async () => {
|
||||
if (!completedRun) {
|
||||
await handleError(new Error('Request closed'));
|
||||
}
|
||||
});
|
||||
|
||||
if (convoId && !_thread_id) {
|
||||
completedRun = true;
|
||||
throw new Error('Missing thread_id for existing conversation');
|
||||
}
|
||||
|
||||
if (!assistant_id) {
|
||||
completedRun = true;
|
||||
throw new Error('Missing assistant_id');
|
||||
}
|
||||
|
||||
const checkBalanceBeforeRun = async () => {
|
||||
if (!isEnabled(process.env.CHECK_BALANCE)) {
|
||||
return;
|
||||
}
|
||||
const transactions =
|
||||
(await getTransactions({
|
||||
user: req.user.id,
|
||||
context: 'message',
|
||||
conversationId,
|
||||
})) ?? [];
|
||||
|
||||
const totalPreviousTokens = Math.abs(
|
||||
transactions.reduce((acc, curr) => acc + curr.rawAmount, 0),
|
||||
);
|
||||
|
||||
// TODO: make promptBuffer a config option; buffer for titles, needs buffer for system instructions
|
||||
const promptBuffer = parentMessageId === Constants.NO_PARENT && !_thread_id ? 200 : 0;
|
||||
// 5 is added for labels
|
||||
let promptTokens = (await countTokens(text + (promptPrefix ?? ''))) + 5;
|
||||
promptTokens += totalPreviousTokens + promptBuffer;
|
||||
// Count tokens up to the current context window
|
||||
promptTokens = Math.min(promptTokens, getModelMaxTokens(model));
|
||||
|
||||
await checkBalance({
|
||||
req,
|
||||
res,
|
||||
txData: {
|
||||
model,
|
||||
user: req.user.id,
|
||||
tokenType: 'prompt',
|
||||
amount: promptTokens,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
const { openai: _openai, client } = await getOpenAIClient({
|
||||
req,
|
||||
res,
|
||||
endpointOption: req.body.endpointOption,
|
||||
initAppClient: true,
|
||||
});
|
||||
|
||||
openai = _openai;
|
||||
|
||||
if (previousMessages.length) {
|
||||
parentMessageId = previousMessages[previousMessages.length - 1].messageId;
|
||||
}
|
||||
|
||||
let userMessage = {
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: ContentTypes.TEXT,
|
||||
text,
|
||||
},
|
||||
],
|
||||
metadata: {
|
||||
messageId: userMessageId,
|
||||
},
|
||||
};
|
||||
|
||||
/** @type {CreateRunBody | undefined} */
|
||||
const body = {
|
||||
assistant_id,
|
||||
model,
|
||||
};
|
||||
|
||||
if (promptPrefix) {
|
||||
body.additional_instructions = promptPrefix;
|
||||
}
|
||||
|
||||
if (instructions) {
|
||||
body.instructions = instructions;
|
||||
}
|
||||
|
||||
const getRequestFileIds = async () => {
|
||||
let thread_file_ids = [];
|
||||
if (convoId) {
|
||||
const convo = await getConvo(req.user.id, convoId);
|
||||
if (convo && convo.file_ids) {
|
||||
thread_file_ids = convo.file_ids;
|
||||
}
|
||||
}
|
||||
|
||||
if (files.length || thread_file_ids.length) {
|
||||
attachedFileIds = new Set([...file_ids, ...thread_file_ids]);
|
||||
|
||||
let attachmentIndex = 0;
|
||||
for (const file of files) {
|
||||
file_ids.push(file.file_id);
|
||||
if (file.type.startsWith('image')) {
|
||||
userMessage.content.push({
|
||||
type: ContentTypes.IMAGE_FILE,
|
||||
[ContentTypes.IMAGE_FILE]: { file_id: file.file_id },
|
||||
});
|
||||
}
|
||||
|
||||
if (!userMessage.attachments) {
|
||||
userMessage.attachments = [];
|
||||
}
|
||||
|
||||
userMessage.attachments.push({
|
||||
file_id: file.file_id,
|
||||
tools: [{ type: ToolCallTypes.CODE_INTERPRETER }],
|
||||
});
|
||||
|
||||
if (file.type.startsWith('image')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const mimeType = file.type;
|
||||
const isSupportedByRetrieval = retrievalMimeTypes.some((regex) => regex.test(mimeType));
|
||||
if (isSupportedByRetrieval) {
|
||||
userMessage.attachments[attachmentIndex].tools.push({
|
||||
type: ToolCallTypes.FILE_SEARCH,
|
||||
});
|
||||
}
|
||||
|
||||
attachmentIndex++;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const initializeThread = async () => {
|
||||
await getRequestFileIds();
|
||||
|
||||
// TODO: may allow multiple messages to be created beforehand in a future update
|
||||
const initThreadBody = {
|
||||
messages: [userMessage],
|
||||
metadata: {
|
||||
user: req.user.id,
|
||||
conversationId,
|
||||
},
|
||||
};
|
||||
|
||||
const result = await initThread({ openai, body: initThreadBody, thread_id });
|
||||
thread_id = result.thread_id;
|
||||
|
||||
createOnTextProgress({
|
||||
openai,
|
||||
conversationId,
|
||||
userMessageId,
|
||||
messageId: responseMessageId,
|
||||
thread_id,
|
||||
});
|
||||
|
||||
requestMessage = {
|
||||
user: req.user.id,
|
||||
text,
|
||||
messageId: userMessageId,
|
||||
parentMessageId,
|
||||
// TODO: make sure client sends correct format for `files`, use zod
|
||||
files,
|
||||
file_ids,
|
||||
conversationId,
|
||||
isCreatedByUser: true,
|
||||
assistant_id,
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
previousMessages.push(requestMessage);
|
||||
|
||||
/* asynchronous */
|
||||
saveUserMessage({ ...requestMessage, model });
|
||||
|
||||
conversation = {
|
||||
conversationId,
|
||||
endpoint,
|
||||
promptPrefix: promptPrefix,
|
||||
instructions: instructions,
|
||||
assistant_id,
|
||||
// model,
|
||||
};
|
||||
|
||||
if (file_ids.length) {
|
||||
conversation.file_ids = file_ids;
|
||||
}
|
||||
};
|
||||
|
||||
const promises = [initializeThread(), checkBalanceBeforeRun()];
|
||||
await Promise.all(promises);
|
||||
|
||||
const sendInitialResponse = () => {
|
||||
sendMessage(res, {
|
||||
sync: true,
|
||||
conversationId,
|
||||
// messages: previousMessages,
|
||||
requestMessage,
|
||||
responseMessage: {
|
||||
user: req.user.id,
|
||||
messageId: openai.responseMessage.messageId,
|
||||
parentMessageId: userMessageId,
|
||||
conversationId,
|
||||
assistant_id,
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
/** @type {RunResponse | typeof StreamRunManager | undefined} */
|
||||
let response;
|
||||
|
||||
const processRun = async (retry = false) => {
|
||||
if (endpoint === EModelEndpoint.azureAssistants) {
|
||||
body.model = openai._options.model;
|
||||
openai.attachedFileIds = attachedFileIds;
|
||||
if (retry) {
|
||||
response = await runAssistant({
|
||||
openai,
|
||||
thread_id,
|
||||
run_id,
|
||||
in_progress: openai.in_progress,
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
/* NOTE:
|
||||
* By default, a Run will use the model and tools configuration specified in Assistant object,
|
||||
* but you can override most of these when creating the Run for added flexibility:
|
||||
*/
|
||||
const run = await createRun({
|
||||
openai,
|
||||
thread_id,
|
||||
body,
|
||||
});
|
||||
|
||||
run_id = run.id;
|
||||
await cache.set(cacheKey, `${thread_id}:${run_id}`, ten_minutes);
|
||||
sendInitialResponse();
|
||||
|
||||
// todo: retry logic
|
||||
response = await runAssistant({ openai, thread_id, run_id });
|
||||
return;
|
||||
}
|
||||
|
||||
/** @type {{[AssistantStreamEvents.ThreadRunCreated]: (event: ThreadRunCreated) => Promise<void>}} */
|
||||
const handlers = {
|
||||
[AssistantStreamEvents.ThreadRunCreated]: async (event) => {
|
||||
await cache.set(cacheKey, `${thread_id}:${event.data.id}`, ten_minutes);
|
||||
run_id = event.data.id;
|
||||
sendInitialResponse();
|
||||
},
|
||||
};
|
||||
|
||||
const streamRunManager = new StreamRunManager({
|
||||
req,
|
||||
res,
|
||||
openai,
|
||||
handlers,
|
||||
thread_id,
|
||||
attachedFileIds,
|
||||
responseMessage: openai.responseMessage,
|
||||
// streamOptions: {
|
||||
|
||||
// },
|
||||
});
|
||||
|
||||
await streamRunManager.runAssistant({
|
||||
thread_id,
|
||||
body,
|
||||
});
|
||||
|
||||
response = streamRunManager;
|
||||
};
|
||||
|
||||
await processRun();
|
||||
logger.debug('[/assistants/chat/] response', {
|
||||
run: response.run,
|
||||
steps: response.steps,
|
||||
});
|
||||
|
||||
if (response.run.status === RunStatus.CANCELLED) {
|
||||
logger.debug('[/assistants/chat/] Run cancelled, handled by `abortRun`');
|
||||
return res.end();
|
||||
}
|
||||
|
||||
if (response.run.status === RunStatus.IN_PROGRESS) {
|
||||
processRun(true);
|
||||
}
|
||||
|
||||
completedRun = response.run;
|
||||
|
||||
/** @type {ResponseMessage} */
|
||||
const responseMessage = {
|
||||
...(response.responseMessage ?? response.finalMessage),
|
||||
parentMessageId: userMessageId,
|
||||
conversationId,
|
||||
user: req.user.id,
|
||||
assistant_id,
|
||||
thread_id,
|
||||
model: assistant_id,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
sendMessage(res, {
|
||||
final: true,
|
||||
conversation,
|
||||
requestMessage: {
|
||||
parentMessageId,
|
||||
thread_id,
|
||||
},
|
||||
});
|
||||
res.end();
|
||||
|
||||
await saveAssistantMessage({ ...responseMessage, model });
|
||||
|
||||
if (parentMessageId === Constants.NO_PARENT && !_thread_id) {
|
||||
addTitle(req, {
|
||||
text,
|
||||
responseText: response.text,
|
||||
conversationId,
|
||||
client,
|
||||
});
|
||||
}
|
||||
|
||||
await addThreadMetadata({
|
||||
openai,
|
||||
thread_id,
|
||||
messageId: responseMessage.messageId,
|
||||
messages: response.messages,
|
||||
});
|
||||
|
||||
if (!response.run.usage) {
|
||||
await sleep(3000);
|
||||
completedRun = await openai.beta.threads.runs.retrieve(thread_id, response.run.id);
|
||||
if (completedRun.usage) {
|
||||
await recordUsage({
|
||||
...completedRun.usage,
|
||||
user: req.user.id,
|
||||
model: completedRun.model ?? model,
|
||||
conversationId,
|
||||
});
|
||||
}
|
||||
} else {
|
||||
await recordUsage({
|
||||
...response.run.usage,
|
||||
user: req.user.id,
|
||||
model: response.run.model ?? model,
|
||||
conversationId,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
await handleError(error);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = chatV2;
|
||||
158
api/server/controllers/assistants/helpers.js
Normal file
158
api/server/controllers/assistants/helpers.js
Normal file
@@ -0,0 +1,158 @@
|
||||
const { EModelEndpoint, CacheKeys, defaultAssistantsVersion } = require('librechat-data-provider');
|
||||
const {
|
||||
initializeClient: initAzureClient,
|
||||
} = require('~/server/services/Endpoints/azureAssistants');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { getLogStores } = require('~/cache');
|
||||
|
||||
/**
|
||||
* @param {Express.Request} req
|
||||
* @param {string} [endpoint]
|
||||
* @returns {Promise<string>}
|
||||
*/
|
||||
const getCurrentVersion = async (req, endpoint) => {
|
||||
const index = req.baseUrl.lastIndexOf('/v');
|
||||
let version = index !== -1 ? req.baseUrl.substring(index + 1, index + 3) : null;
|
||||
if (!version && req.body.version) {
|
||||
version = `v${req.body.version}`;
|
||||
}
|
||||
if (!version && endpoint) {
|
||||
const cache = getLogStores(CacheKeys.CONFIG_STORE);
|
||||
const cachedEndpointsConfig = await cache.get(CacheKeys.ENDPOINT_CONFIG);
|
||||
version = `v${
|
||||
cachedEndpointsConfig?.[endpoint]?.version ?? defaultAssistantsVersion[endpoint]
|
||||
}`;
|
||||
}
|
||||
if (!version?.startsWith('v') && version.length !== 2) {
|
||||
throw new Error(`[${req.baseUrl}] Invalid version: ${version}`);
|
||||
}
|
||||
return version;
|
||||
};
|
||||
|
||||
/**
|
||||
* Asynchronously lists assistants based on provided query parameters.
|
||||
*
|
||||
* Initializes the client with the current request and response objects and lists assistants
|
||||
* according to the query parameters. This function abstracts the logic for non-Azure paths.
|
||||
*
|
||||
* @async
|
||||
* @param {object} params - The parameters object.
|
||||
* @param {object} params.req - The request object, used for initializing the client.
|
||||
* @param {object} params.res - The response object, used for initializing the client.
|
||||
* @param {string} params.version - The API version to use.
|
||||
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
|
||||
* @returns {Promise<object>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
|
||||
*/
|
||||
const listAssistants = async ({ req, res, version, query }) => {
|
||||
const { openai } = await getOpenAIClient({ req, res, version });
|
||||
return openai.beta.assistants.list(query);
|
||||
};
|
||||
|
||||
/**
|
||||
* Asynchronously lists assistants for Azure configured groups.
|
||||
*
|
||||
* Iterates through Azure configured assistant groups, initializes the client with the current request and response objects,
|
||||
* lists assistants based on the provided query parameters, and merges their data alongside the model information into a single array.
|
||||
*
|
||||
* @async
|
||||
* @param {object} params - The parameters object.
|
||||
* @param {object} params.req - The request object, used for initializing the client and manipulating the request body.
|
||||
* @param {object} params.res - The response object, used for initializing the client.
|
||||
* @param {string} params.version - The API version to use.
|
||||
* @param {TAzureConfig} params.azureConfig - The Azure configuration object containing assistantGroups and groupMap.
|
||||
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
|
||||
* @returns {Promise<AssistantListResponse>} A promise that resolves to an array of assistant data merged with their respective model information.
|
||||
*/
|
||||
const listAssistantsForAzure = async ({ req, res, version, azureConfig = {}, query }) => {
|
||||
/** @type {Array<[string, TAzureModelConfig]>} */
|
||||
const groupModelTuples = [];
|
||||
const promises = [];
|
||||
/** @type {Array<TAzureGroup>} */
|
||||
const groups = [];
|
||||
|
||||
const { groupMap, assistantGroups } = azureConfig;
|
||||
|
||||
for (const groupName of assistantGroups) {
|
||||
const group = groupMap[groupName];
|
||||
groups.push(group);
|
||||
|
||||
const currentModelTuples = Object.entries(group?.models);
|
||||
groupModelTuples.push(currentModelTuples);
|
||||
|
||||
/* The specified model is only necessary to
|
||||
fetch assistants for the shared instance */
|
||||
req.body.model = currentModelTuples[0][0];
|
||||
promises.push(listAssistants({ req, res, version, query }));
|
||||
}
|
||||
|
||||
const resolvedQueries = await Promise.all(promises);
|
||||
const data = resolvedQueries.flatMap((res, i) =>
|
||||
res.data.map((assistant) => {
|
||||
const deploymentName = assistant.model;
|
||||
const currentGroup = groups[i];
|
||||
const currentModelTuples = groupModelTuples[i];
|
||||
const firstModel = currentModelTuples[0][0];
|
||||
|
||||
if (currentGroup.deploymentName === deploymentName) {
|
||||
return { ...assistant, model: firstModel };
|
||||
}
|
||||
|
||||
for (const [model, modelConfig] of currentModelTuples) {
|
||||
if (modelConfig.deploymentName === deploymentName) {
|
||||
return { ...assistant, model };
|
||||
}
|
||||
}
|
||||
|
||||
return { ...assistant, model: firstModel };
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
first_id: data[0]?.id,
|
||||
last_id: data[data.length - 1]?.id,
|
||||
object: 'list',
|
||||
has_more: false,
|
||||
data,
|
||||
};
|
||||
};
|
||||
|
||||
async function getOpenAIClient({ req, res, endpointOption, initAppClient, overrideEndpoint }) {
|
||||
let endpoint = overrideEndpoint ?? req.body.endpoint ?? req.query.endpoint;
|
||||
const version = await getCurrentVersion(req, endpoint);
|
||||
if (!endpoint) {
|
||||
throw new Error(`[${req.baseUrl}] Endpoint is required`);
|
||||
}
|
||||
|
||||
let result;
|
||||
if (endpoint === EModelEndpoint.assistants) {
|
||||
result = await initializeClient({ req, res, version, endpointOption, initAppClient });
|
||||
} else if (endpoint === EModelEndpoint.azureAssistants) {
|
||||
result = await initAzureClient({ req, res, version, endpointOption, initAppClient });
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
const fetchAssistants = async (req, res) => {
|
||||
const { limit = 100, order = 'desc', after, before, endpoint } = req.query;
|
||||
const version = await getCurrentVersion(req, endpoint);
|
||||
const query = { limit, order, after, before };
|
||||
|
||||
/** @type {AssistantListResponse} */
|
||||
let body;
|
||||
|
||||
if (endpoint === EModelEndpoint.assistants) {
|
||||
({ body } = await listAssistants({ req, res, version, query }));
|
||||
} else if (endpoint === EModelEndpoint.azureAssistants) {
|
||||
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||
body = await listAssistantsForAzure({ req, res, version, azureConfig, query });
|
||||
}
|
||||
|
||||
return body;
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
getOpenAIClient,
|
||||
fetchAssistants,
|
||||
getCurrentVersion,
|
||||
};
|
||||
@@ -1,34 +1,11 @@
|
||||
const multer = require('multer');
|
||||
const express = require('express');
|
||||
const { FileContext, EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
initializeClient,
|
||||
listAssistantsForAzure,
|
||||
listAssistants,
|
||||
} = require('~/server/services/Endpoints/assistants');
|
||||
const { FileContext } = require('librechat-data-provider');
|
||||
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
|
||||
const { deleteAssistantActions } = require('~/server/services/ActionService');
|
||||
const { uploadImageBuffer } = require('~/server/services/Files/process');
|
||||
const { updateAssistant, getAssistants } = require('~/models/Assistant');
|
||||
const { getOpenAIClient, fetchAssistants } = require('./helpers');
|
||||
const { deleteFileByFilter } = require('~/models/File');
|
||||
const { logger } = require('~/config');
|
||||
const actions = require('./actions');
|
||||
const tools = require('./tools');
|
||||
|
||||
const upload = multer();
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Assistant actions route.
|
||||
* @route GET|POST /assistants/actions
|
||||
*/
|
||||
router.use('/actions', actions);
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
* @route GET /assistants/tools
|
||||
* @returns {TPlugin[]} 200 - application/json
|
||||
*/
|
||||
router.use('/tools', tools);
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
@@ -36,12 +13,11 @@ router.use('/tools', tools);
|
||||
* @param {AssistantCreateParams} req.body - The assistant creation parameters.
|
||||
* @returns {Assistant} 201 - success response - application/json
|
||||
*/
|
||||
router.post('/', async (req, res) => {
|
||||
const createAssistant = async (req, res) => {
|
||||
try {
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const { tools = [], ...assistantData } = req.body;
|
||||
const { tools = [], endpoint, ...assistantData } = req.body;
|
||||
assistantData.tools = tools
|
||||
.map((tool) => {
|
||||
if (typeof tool !== 'string') {
|
||||
@@ -52,18 +28,28 @@ router.post('/', async (req, res) => {
|
||||
})
|
||||
.filter((tool) => tool);
|
||||
|
||||
let azureModelIdentifier = null;
|
||||
if (openai.locals?.azureOptions) {
|
||||
azureModelIdentifier = assistantData.model;
|
||||
assistantData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
|
||||
}
|
||||
|
||||
assistantData.metadata = {
|
||||
author: req.user.id,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
const assistant = await openai.beta.assistants.create(assistantData);
|
||||
if (azureModelIdentifier) {
|
||||
assistant.model = azureModelIdentifier;
|
||||
}
|
||||
logger.debug('/assistants/', assistant);
|
||||
res.status(201).json(assistant);
|
||||
} catch (error) {
|
||||
logger.error('[/assistants] Error creating assistant', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Retrieves an assistant.
|
||||
@@ -71,10 +57,10 @@ router.post('/', async (req, res) => {
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/:id', async (req, res) => {
|
||||
const retrieveAssistant = async (req, res) => {
|
||||
try {
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
/* NOTE: not actually being used right now */
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const assistant_id = req.params.id;
|
||||
const assistant = await openai.beta.assistants.retrieve(assistant_id);
|
||||
@@ -83,22 +69,23 @@ router.get('/:id', async (req, res) => {
|
||||
logger.error('[/assistants/:id] Error retrieving assistant', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Modifies an assistant.
|
||||
* @route PATCH /assistants/:id
|
||||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @param {AssistantUpdateParams} req.body - The assistant update parameters.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.patch('/:id', async (req, res) => {
|
||||
const patchAssistant = async (req, res) => {
|
||||
try {
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const assistant_id = req.params.id;
|
||||
const updateData = req.body;
|
||||
const { endpoint: _e, ...updateData } = req.body;
|
||||
updateData.tools = (updateData.tools ?? [])
|
||||
.map((tool) => {
|
||||
if (typeof tool !== 'string') {
|
||||
@@ -119,52 +106,46 @@ router.patch('/:id', async (req, res) => {
|
||||
logger.error('[/assistants/:id] Error updating assistant', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes an assistant.
|
||||
* @route DELETE /assistants/:id
|
||||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.delete('/:id', async (req, res) => {
|
||||
const deleteAssistant = async (req, res) => {
|
||||
try {
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const assistant_id = req.params.id;
|
||||
const deletionStatus = await openai.beta.assistants.del(assistant_id);
|
||||
if (deletionStatus?.deleted) {
|
||||
await deleteAssistantActions({ req, assistant_id });
|
||||
}
|
||||
res.json(deletionStatus);
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/:id] Error deleting assistant', error);
|
||||
res.status(500).json({ error: 'Error deleting assistant' });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a list of assistants.
|
||||
* @route GET /assistants
|
||||
* @param {object} req - Express Request
|
||||
* @param {AssistantListParams} req.query - The assistant list parameters for pagination and sorting.
|
||||
* @returns {AssistantListResponse} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/', async (req, res) => {
|
||||
const listAssistants = async (req, res) => {
|
||||
try {
|
||||
const { limit = 100, order = 'desc', after, before } = req.query;
|
||||
const query = { limit, order, after, before };
|
||||
const body = await fetchAssistants(req, res);
|
||||
|
||||
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||
/** @type {AssistantListResponse} */
|
||||
let body;
|
||||
|
||||
if (azureConfig?.assistants) {
|
||||
body = await listAssistantsForAzure({ req, res, azureConfig, query });
|
||||
} else {
|
||||
({ body } = await listAssistants({ req, res, query }));
|
||||
}
|
||||
|
||||
if (req.app.locals?.[EModelEndpoint.assistants]) {
|
||||
if (req.app.locals?.[req.query.endpoint]) {
|
||||
/** @type {Partial<TAssistantEndpoint>} */
|
||||
const assistantsConfig = req.app.locals[EModelEndpoint.assistants];
|
||||
const assistantsConfig = req.app.locals[req.query.endpoint];
|
||||
const { supportedIds, excludedIds } = assistantsConfig;
|
||||
if (supportedIds?.length) {
|
||||
body.data = body.data.filter((assistant) => supportedIds.includes(assistant.id));
|
||||
@@ -178,31 +159,34 @@ router.get('/', async (req, res) => {
|
||||
logger.error('[/assistants] Error listing assistants', error);
|
||||
res.status(500).json({ message: 'Error listing assistants' });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a list of the user's assistant documents (metadata saved to database).
|
||||
* @route GET /assistants/documents
|
||||
* @returns {AssistantDocument[]} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/documents', async (req, res) => {
|
||||
const getAssistantDocuments = async (req, res) => {
|
||||
try {
|
||||
res.json(await getAssistants({ user: req.user.id }));
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/documents] Error listing assistant documents', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Uploads and updates an avatar for a specific assistant.
|
||||
* @route POST /avatar/:assistant_id
|
||||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.assistant_id - The ID of the assistant.
|
||||
* @param {Express.Multer.File} req.file - The avatar image file.
|
||||
* @param {object} req.body - Request body
|
||||
* @param {string} [req.body.metadata] - Optional metadata for the assistant's avatar.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
*/
|
||||
router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) => {
|
||||
const uploadAssistantAvatar = async (req, res) => {
|
||||
try {
|
||||
const { assistant_id } = req.params;
|
||||
if (!assistant_id) {
|
||||
@@ -210,8 +194,7 @@ router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) =>
|
||||
}
|
||||
|
||||
let { metadata: _metadata = '{}' } = req.body;
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const image = await uploadImageBuffer({
|
||||
req,
|
||||
@@ -266,6 +249,14 @@ router.post('/avatar/:assistant_id', upload.single('file'), async (req, res) =>
|
||||
logger.error(message, error);
|
||||
res.status(500).json({ message });
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = router;
|
||||
module.exports = {
|
||||
createAssistant,
|
||||
retrieveAssistant,
|
||||
patchAssistant,
|
||||
deleteAssistant,
|
||||
listAssistants,
|
||||
getAssistantDocuments,
|
||||
uploadAssistantAvatar,
|
||||
};
|
||||
208
api/server/controllers/assistants/v2.js
Normal file
208
api/server/controllers/assistants/v2.js
Normal file
@@ -0,0 +1,208 @@
|
||||
const { ToolCallTypes } = require('librechat-data-provider');
|
||||
const { validateAndUpdateTool } = require('~/server/services/ActionService');
|
||||
const { getOpenAIClient } = require('./helpers');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
* @route POST /assistants
|
||||
* @param {AssistantCreateParams} req.body - The assistant creation parameters.
|
||||
* @returns {Assistant} 201 - success response - application/json
|
||||
*/
|
||||
const createAssistant = async (req, res) => {
|
||||
try {
|
||||
/** @type {{ openai: OpenAIClient }} */
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const { tools = [], endpoint, ...assistantData } = req.body;
|
||||
assistantData.tools = tools
|
||||
.map((tool) => {
|
||||
if (typeof tool !== 'string') {
|
||||
return tool;
|
||||
}
|
||||
|
||||
return req.app.locals.availableTools[tool];
|
||||
})
|
||||
.filter((tool) => tool);
|
||||
|
||||
let azureModelIdentifier = null;
|
||||
if (openai.locals?.azureOptions) {
|
||||
azureModelIdentifier = assistantData.model;
|
||||
assistantData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
|
||||
}
|
||||
|
||||
assistantData.metadata = {
|
||||
author: req.user.id,
|
||||
endpoint,
|
||||
};
|
||||
|
||||
const assistant = await openai.beta.assistants.create(assistantData);
|
||||
if (azureModelIdentifier) {
|
||||
assistant.model = azureModelIdentifier;
|
||||
}
|
||||
logger.debug('/assistants/', assistant);
|
||||
res.status(201).json(assistant);
|
||||
} catch (error) {
|
||||
logger.error('[/assistants] Error creating assistant', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Modifies an assistant.
|
||||
* @param {object} params
|
||||
* @param {Express.Request} params.req
|
||||
* @param {OpenAIClient} params.openai
|
||||
* @param {string} params.assistant_id
|
||||
* @param {AssistantUpdateParams} params.updateData
|
||||
* @returns {Promise<Assistant>} The updated assistant.
|
||||
*/
|
||||
const updateAssistant = async ({ req, openai, assistant_id, updateData }) => {
|
||||
const tools = [];
|
||||
|
||||
let hasFileSearch = false;
|
||||
for (const tool of updateData.tools ?? []) {
|
||||
let actualTool = typeof tool === 'string' ? req.app.locals.availableTools[tool] : tool;
|
||||
|
||||
if (!actualTool) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (actualTool.type === ToolCallTypes.FILE_SEARCH) {
|
||||
hasFileSearch = true;
|
||||
}
|
||||
|
||||
if (!actualTool.function) {
|
||||
tools.push(actualTool);
|
||||
continue;
|
||||
}
|
||||
|
||||
const updatedTool = await validateAndUpdateTool({ req, tool: actualTool, assistant_id });
|
||||
if (updatedTool) {
|
||||
tools.push(updatedTool);
|
||||
}
|
||||
}
|
||||
|
||||
if (hasFileSearch && !updateData.tool_resources) {
|
||||
const assistant = await openai.beta.assistants.retrieve(assistant_id);
|
||||
updateData.tool_resources = assistant.tool_resources ?? null;
|
||||
}
|
||||
|
||||
if (hasFileSearch && !updateData.tool_resources?.file_search) {
|
||||
updateData.tool_resources = {
|
||||
...(updateData.tool_resources ?? {}),
|
||||
file_search: {
|
||||
vector_store_ids: [],
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
updateData.tools = tools;
|
||||
|
||||
if (openai.locals?.azureOptions && updateData.model) {
|
||||
updateData.model = openai.locals.azureOptions.azureOpenAIApiDeploymentName;
|
||||
}
|
||||
|
||||
return await openai.beta.assistants.update(assistant_id, updateData);
|
||||
};
|
||||
|
||||
/**
|
||||
* Modifies an assistant with the resource file id.
|
||||
* @param {object} params
|
||||
* @param {Express.Request} params.req
|
||||
* @param {OpenAIClient} params.openai
|
||||
* @param {string} params.assistant_id
|
||||
* @param {string} params.tool_resource
|
||||
* @param {string} params.file_id
|
||||
* @param {AssistantUpdateParams} params.updateData
|
||||
* @returns {Promise<Assistant>} The updated assistant.
|
||||
*/
|
||||
const addResourceFileId = async ({ req, openai, assistant_id, tool_resource, file_id }) => {
|
||||
const assistant = await openai.beta.assistants.retrieve(assistant_id);
|
||||
const { tool_resources = {} } = assistant;
|
||||
if (tool_resources[tool_resource]) {
|
||||
tool_resources[tool_resource].file_ids.push(file_id);
|
||||
} else {
|
||||
tool_resources[tool_resource] = { file_ids: [file_id] };
|
||||
}
|
||||
|
||||
delete assistant.id;
|
||||
return await updateAssistant({
|
||||
req,
|
||||
openai,
|
||||
assistant_id,
|
||||
updateData: { tools: assistant.tools, tool_resources },
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Deletes a file ID from an assistant's resource.
|
||||
* @param {object} params
|
||||
* @param {Express.Request} params.req
|
||||
* @param {OpenAIClient} params.openai
|
||||
* @param {string} params.assistant_id
|
||||
* @param {string} [params.tool_resource]
|
||||
* @param {string} params.file_id
|
||||
* @param {AssistantUpdateParams} params.updateData
|
||||
* @returns {Promise<Assistant>} The updated assistant.
|
||||
*/
|
||||
const deleteResourceFileId = async ({ req, openai, assistant_id, tool_resource, file_id }) => {
|
||||
const assistant = await openai.beta.assistants.retrieve(assistant_id);
|
||||
const { tool_resources = {} } = assistant;
|
||||
|
||||
if (tool_resource && tool_resources[tool_resource]) {
|
||||
const resource = tool_resources[tool_resource];
|
||||
const index = resource.file_ids.indexOf(file_id);
|
||||
if (index !== -1) {
|
||||
resource.file_ids.splice(index, 1);
|
||||
}
|
||||
} else {
|
||||
for (const resourceKey in tool_resources) {
|
||||
const resource = tool_resources[resourceKey];
|
||||
const index = resource.file_ids.indexOf(file_id);
|
||||
if (index !== -1) {
|
||||
resource.file_ids.splice(index, 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
delete assistant.id;
|
||||
return await updateAssistant({
|
||||
req,
|
||||
openai,
|
||||
assistant_id,
|
||||
updateData: { tools: assistant.tools, tool_resources },
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Modifies an assistant.
|
||||
* @route PATCH /assistants/:id
|
||||
* @param {object} req - Express Request
|
||||
* @param {object} req.params - Request params
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @param {AssistantUpdateParams} req.body - The assistant update parameters.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
const patchAssistant = async (req, res) => {
|
||||
try {
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
const assistant_id = req.params.id;
|
||||
const { endpoint: _e, ...updateData } = req.body;
|
||||
updateData.tools = updateData.tools ?? [];
|
||||
const updatedAssistant = await updateAssistant({ req, openai, assistant_id, updateData });
|
||||
res.json(updatedAssistant);
|
||||
} catch (error) {
|
||||
logger.error('[/assistants/:id] Error updating assistant', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
patchAssistant,
|
||||
createAssistant,
|
||||
updateAssistant,
|
||||
addResourceFileId,
|
||||
deleteResourceFileId,
|
||||
};
|
||||
@@ -85,6 +85,7 @@ const startServer = async () => {
|
||||
app.use('/api/assistants', routes.assistants);
|
||||
app.use('/api/files', await routes.files.initialize());
|
||||
app.use('/images/', validateImageRequest, routes.staticRoute);
|
||||
app.use('/api/share', routes.share);
|
||||
|
||||
app.use((req, res) => {
|
||||
res.status(404).sendFile(path.join(app.locals.paths.dist, 'index.html'));
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
const { EModelEndpoint } = require('librechat-data-provider');
|
||||
const { isAssistantsEndpoint } = require('librechat-data-provider');
|
||||
const { sendMessage, sendError, countTokens, isEnabled } = require('~/server/utils');
|
||||
const { truncateText, smartTruncateText } = require('~/app/clients/prompts');
|
||||
const { saveMessage, getConvo, getConvoTitle } = require('~/models');
|
||||
@@ -15,7 +15,7 @@ async function abortMessage(req, res) {
|
||||
abortKey = conversationId;
|
||||
}
|
||||
|
||||
if (endpoint === EModelEndpoint.assistants) {
|
||||
if (isAssistantsEndpoint(endpoint)) {
|
||||
return await abortRun(req, res);
|
||||
}
|
||||
|
||||
@@ -73,6 +73,8 @@ const createAbortController = (req, res, getAbortData) => {
|
||||
...responseData,
|
||||
conversationId,
|
||||
finish_reason: 'incomplete',
|
||||
endpoint: endpointOption.endpoint,
|
||||
iconURL: endpointOption.iconURL,
|
||||
model: endpointOption.modelOptions.model,
|
||||
unfinished: false,
|
||||
error: false,
|
||||
|
||||
@@ -10,7 +10,7 @@ const three_minutes = 1000 * 60 * 3;
|
||||
|
||||
async function abortRun(req, res) {
|
||||
res.setHeader('Content-Type', 'application/json');
|
||||
const { abortKey } = req.body;
|
||||
const { abortKey, endpoint } = req.body;
|
||||
const [conversationId, latestMessageId] = abortKey.split(':');
|
||||
const conversation = await getConvo(req.user.id, conversationId);
|
||||
|
||||
@@ -68,9 +68,10 @@ async function abortRun(req, res) {
|
||||
|
||||
runMessages = await checkMessageGaps({
|
||||
openai,
|
||||
latestMessageId,
|
||||
endpoint,
|
||||
thread_id,
|
||||
run_id,
|
||||
latestMessageId,
|
||||
conversationId,
|
||||
});
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
const { parseConvo, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
||||
const azureAssistants = require('~/server/services/Endpoints/azureAssistants');
|
||||
const assistants = require('~/server/services/Endpoints/assistants');
|
||||
const gptPlugins = require('~/server/services/Endpoints/gptPlugins');
|
||||
const { processFiles } = require('~/server/services/Files/process');
|
||||
@@ -7,6 +8,8 @@ const anthropic = require('~/server/services/Endpoints/anthropic');
|
||||
const openAI = require('~/server/services/Endpoints/openAI');
|
||||
const custom = require('~/server/services/Endpoints/custom');
|
||||
const google = require('~/server/services/Endpoints/google');
|
||||
const enforceModelSpec = require('./enforceModelSpec');
|
||||
const { handleError } = require('~/server/utils');
|
||||
|
||||
const buildFunction = {
|
||||
[EModelEndpoint.openAI]: openAI.buildOptions,
|
||||
@@ -16,11 +19,46 @@ const buildFunction = {
|
||||
[EModelEndpoint.anthropic]: anthropic.buildOptions,
|
||||
[EModelEndpoint.gptPlugins]: gptPlugins.buildOptions,
|
||||
[EModelEndpoint.assistants]: assistants.buildOptions,
|
||||
[EModelEndpoint.azureAssistants]: azureAssistants.buildOptions,
|
||||
};
|
||||
|
||||
async function buildEndpointOption(req, res, next) {
|
||||
const { endpoint, endpointType } = req.body;
|
||||
const parsedBody = parseConvo({ endpoint, endpointType, conversation: req.body });
|
||||
|
||||
if (req.app.locals.modelSpecs?.list && req.app.locals.modelSpecs?.enforce) {
|
||||
/** @type {{ list: TModelSpec[] }}*/
|
||||
const { list } = req.app.locals.modelSpecs;
|
||||
const { spec } = parsedBody;
|
||||
|
||||
if (!spec) {
|
||||
return handleError(res, { text: 'No model spec selected' });
|
||||
}
|
||||
|
||||
const currentModelSpec = list.find((s) => s.name === spec);
|
||||
if (!currentModelSpec) {
|
||||
return handleError(res, { text: 'Invalid model spec' });
|
||||
}
|
||||
|
||||
if (endpoint !== currentModelSpec.preset.endpoint) {
|
||||
return handleError(res, { text: 'Model spec mismatch' });
|
||||
}
|
||||
|
||||
if (
|
||||
currentModelSpec.preset.endpoint !== EModelEndpoint.gptPlugins &&
|
||||
currentModelSpec.preset.tools
|
||||
) {
|
||||
return handleError(res, {
|
||||
text: `Only the "${EModelEndpoint.gptPlugins}" endpoint can have tools defined in the preset`,
|
||||
});
|
||||
}
|
||||
|
||||
const isValidModelSpec = enforceModelSpec(currentModelSpec, parsedBody);
|
||||
if (!isValidModelSpec) {
|
||||
return handleError(res, { text: 'Model spec mismatch' });
|
||||
}
|
||||
}
|
||||
|
||||
req.body.endpointOption = buildFunction[endpointType ?? endpoint](
|
||||
endpoint,
|
||||
parsedBody,
|
||||
|
||||
25
api/server/middleware/checkDomainAllowed.js
Normal file
25
api/server/middleware/checkDomainAllowed.js
Normal file
@@ -0,0 +1,25 @@
|
||||
const { isDomainAllowed } = require('~/server/services/AuthService');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
/**
|
||||
* Checks the domain's social login is allowed
|
||||
*
|
||||
* @async
|
||||
* @function
|
||||
* @param {Object} req - Express request object.
|
||||
* @param {Object} res - Express response object.
|
||||
* @param {Function} next - Next middleware function.
|
||||
*
|
||||
* @returns {Promise<function|Object>} - Returns a Promise which when resolved calls next middleware if the domain's email is allowed
|
||||
*/
|
||||
const checkDomainAllowed = async (req, res, next = () => {}) => {
|
||||
const email = req?.user?.email;
|
||||
if (email && !(await isDomainAllowed(email))) {
|
||||
logger.error(`[Social Login] [Social Login not allowed] [Email: ${email}]`);
|
||||
return res.redirect('/login');
|
||||
} else {
|
||||
return next();
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = checkDomainAllowed;
|
||||
58
api/server/middleware/enforceModelSpec.js
Normal file
58
api/server/middleware/enforceModelSpec.js
Normal file
@@ -0,0 +1,58 @@
|
||||
const interchangeableKeys = new Map([
|
||||
['chatGptLabel', ['modelLabel']],
|
||||
['modelLabel', ['chatGptLabel']],
|
||||
]);
|
||||
|
||||
/**
|
||||
* Middleware to enforce the model spec for a conversation
|
||||
* @param {TModelSpec} modelSpec - The model spec to enforce
|
||||
* @param {TConversation} parsedBody - The parsed body of the conversation
|
||||
* @returns {boolean} - Whether the model spec is enforced
|
||||
*/
|
||||
const enforceModelSpec = (modelSpec, parsedBody) => {
|
||||
for (const [key, value] of Object.entries(modelSpec.preset)) {
|
||||
if (key === 'endpoint') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!checkMatch(key, value, parsedBody)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
/**
|
||||
* Checks if there is a match for the given key and value in the parsed body
|
||||
* or any of its interchangeable keys, including deep comparison for objects and arrays.
|
||||
* @param {string} key
|
||||
* @param {any} value
|
||||
* @param {object} parsedBody
|
||||
* @returns {boolean}
|
||||
*/
|
||||
const checkMatch = (key, value, parsedBody) => {
|
||||
const isEqual = (a, b) => {
|
||||
if (Array.isArray(a) && Array.isArray(b)) {
|
||||
return a.length === b.length && a.every((val, index) => isEqual(val, b[index]));
|
||||
} else if (typeof a === 'object' && typeof b === 'object' && a !== null && b !== null) {
|
||||
const keysA = Object.keys(a);
|
||||
const keysB = Object.keys(b);
|
||||
return keysA.length === keysB.length && keysA.every((k) => isEqual(a[k], b[k]));
|
||||
}
|
||||
return a === b;
|
||||
};
|
||||
|
||||
if (isEqual(parsedBody[key], value)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (interchangeableKeys.has(key)) {
|
||||
return interchangeableKeys
|
||||
.get(key)
|
||||
.some((interchangeableKey) => isEqual(parsedBody[interchangeableKey], value));
|
||||
}
|
||||
|
||||
return false;
|
||||
};
|
||||
|
||||
module.exports = enforceModelSpec;
|
||||
47
api/server/middleware/enforceModelSpec.spec.js
Normal file
47
api/server/middleware/enforceModelSpec.spec.js
Normal file
@@ -0,0 +1,47 @@
|
||||
// enforceModelSpec.test.js
|
||||
|
||||
const enforceModelSpec = require('./enforceModelSpec');
|
||||
|
||||
describe('enforceModelSpec function', () => {
|
||||
test('returns true when all model specs match parsed body directly', () => {
|
||||
const modelSpec = { preset: { title: 'Dialog', status: 'Active' } };
|
||||
const parsedBody = { title: 'Dialog', status: 'Active' };
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(true);
|
||||
});
|
||||
|
||||
test('returns true when model specs match via interchangeable keys', () => {
|
||||
const modelSpec = { preset: { chatGptLabel: 'GPT-4' } };
|
||||
const parsedBody = { modelLabel: 'GPT-4' };
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(true);
|
||||
});
|
||||
|
||||
test('returns false if any key value does not match', () => {
|
||||
const modelSpec = { preset: { language: 'English', level: 'Advanced' } };
|
||||
const parsedBody = { language: 'Spanish', level: 'Advanced' };
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(false);
|
||||
});
|
||||
|
||||
test('ignores the \'endpoint\' key in model spec', () => {
|
||||
const modelSpec = { preset: { endpoint: 'ignored', feature: 'Special' } };
|
||||
const parsedBody = { feature: 'Special' };
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(true);
|
||||
});
|
||||
|
||||
test('handles nested objects correctly', () => {
|
||||
const modelSpec = { preset: { details: { time: 'noon', location: 'park' } } };
|
||||
const parsedBody = { details: { time: 'noon', location: 'park' } };
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(true);
|
||||
});
|
||||
|
||||
test('handles arrays within objects', () => {
|
||||
const modelSpec = { preset: { tags: ['urgent', 'important'] } };
|
||||
const parsedBody = { tags: ['urgent', 'important'] };
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(true);
|
||||
});
|
||||
|
||||
test('fails when arrays in objects do not match', () => {
|
||||
const modelSpec = { preset: { tags: ['urgent', 'important'] } };
|
||||
const parsedBody = { tags: ['important', 'urgent'] }; // Different order
|
||||
expect(enforceModelSpec(modelSpec, parsedBody)).toBe(false);
|
||||
});
|
||||
});
|
||||
69
api/server/middleware/importLimiters.js
Normal file
69
api/server/middleware/importLimiters.js
Normal file
@@ -0,0 +1,69 @@
|
||||
const rateLimit = require('express-rate-limit');
|
||||
const { ViolationTypes } = require('librechat-data-provider');
|
||||
const logViolation = require('~/cache/logViolation');
|
||||
|
||||
const getEnvironmentVariables = () => {
|
||||
const IMPORT_IP_MAX = parseInt(process.env.IMPORT_IP_MAX) || 100;
|
||||
const IMPORT_IP_WINDOW = parseInt(process.env.IMPORT_IP_WINDOW) || 15;
|
||||
const IMPORT_USER_MAX = parseInt(process.env.IMPORT_USER_MAX) || 50;
|
||||
const IMPORT_USER_WINDOW = parseInt(process.env.IMPORT_USER_WINDOW) || 15;
|
||||
|
||||
const importIpWindowMs = IMPORT_IP_WINDOW * 60 * 1000;
|
||||
const importIpMax = IMPORT_IP_MAX;
|
||||
const importIpWindowInMinutes = importIpWindowMs / 60000;
|
||||
|
||||
const importUserWindowMs = IMPORT_USER_WINDOW * 60 * 1000;
|
||||
const importUserMax = IMPORT_USER_MAX;
|
||||
const importUserWindowInMinutes = importUserWindowMs / 60000;
|
||||
|
||||
return {
|
||||
importIpWindowMs,
|
||||
importIpMax,
|
||||
importIpWindowInMinutes,
|
||||
importUserWindowMs,
|
||||
importUserMax,
|
||||
importUserWindowInMinutes,
|
||||
};
|
||||
};
|
||||
|
||||
const createImportHandler = (ip = true) => {
|
||||
const { importIpMax, importIpWindowInMinutes, importUserMax, importUserWindowInMinutes } =
|
||||
getEnvironmentVariables();
|
||||
|
||||
return async (req, res) => {
|
||||
const type = ViolationTypes.FILE_UPLOAD_LIMIT;
|
||||
const errorMessage = {
|
||||
type,
|
||||
max: ip ? importIpMax : importUserMax,
|
||||
limiter: ip ? 'ip' : 'user',
|
||||
windowInMinutes: ip ? importIpWindowInMinutes : importUserWindowInMinutes,
|
||||
};
|
||||
|
||||
await logViolation(req, res, type, errorMessage);
|
||||
res.status(429).json({ message: 'Too many conversation import requests. Try again later' });
|
||||
};
|
||||
};
|
||||
|
||||
const createImportLimiters = () => {
|
||||
const { importIpWindowMs, importIpMax, importUserWindowMs, importUserMax } =
|
||||
getEnvironmentVariables();
|
||||
|
||||
const importIpLimiter = rateLimit({
|
||||
windowMs: importIpWindowMs,
|
||||
max: importIpMax,
|
||||
handler: createImportHandler(),
|
||||
});
|
||||
|
||||
const importUserLimiter = rateLimit({
|
||||
windowMs: importUserWindowMs,
|
||||
max: importUserMax,
|
||||
handler: createImportHandler(false),
|
||||
keyGenerator: function (req) {
|
||||
return req.user?.id; // Use the user ID or NULL if not available
|
||||
},
|
||||
});
|
||||
|
||||
return { importIpLimiter, importUserLimiter };
|
||||
};
|
||||
|
||||
module.exports = { createImportLimiters };
|
||||
@@ -1,5 +1,6 @@
|
||||
const abortMiddleware = require('./abortMiddleware');
|
||||
const checkBan = require('./checkBan');
|
||||
const checkDomainAllowed = require('./checkDomainAllowed');
|
||||
const uaParser = require('./uaParser');
|
||||
const setHeaders = require('./setHeaders');
|
||||
const loginLimiter = require('./loginLimiter');
|
||||
@@ -17,6 +18,7 @@ const validateRegistration = require('./validateRegistration');
|
||||
const validateImageRequest = require('./validateImageRequest');
|
||||
const moderateText = require('./moderateText');
|
||||
const noIndex = require('./noIndex');
|
||||
const importLimiters = require('./importLimiters');
|
||||
|
||||
module.exports = {
|
||||
...uploadLimiters,
|
||||
@@ -38,4 +40,6 @@ module.exports = {
|
||||
validateModel,
|
||||
moderateText,
|
||||
noIndex,
|
||||
...importLimiters,
|
||||
checkDomainAllowed,
|
||||
};
|
||||
|
||||
@@ -2,10 +2,9 @@ const { v4 } = require('uuid');
|
||||
const express = require('express');
|
||||
const { encryptMetadata, domainParser } = require('~/server/services/ActionService');
|
||||
const { actionDelimiter, EModelEndpoint } = require('librechat-data-provider');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
|
||||
const { updateAction, getActions, deleteAction } = require('~/models/Action');
|
||||
const { updateAssistant, getAssistant } = require('~/models/Assistant');
|
||||
const { withSession } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const router = express.Router();
|
||||
@@ -46,7 +45,6 @@ router.post('/:assistant_id', async (req, res) => {
|
||||
let metadata = encryptMetadata(_metadata);
|
||||
|
||||
let { domain } = metadata;
|
||||
/* Azure doesn't support periods in function names */
|
||||
domain = await domainParser(req, domain, true);
|
||||
|
||||
if (!domain) {
|
||||
@@ -56,8 +54,7 @@ router.post('/:assistant_id', async (req, res) => {
|
||||
const action_id = _action_id ?? v4();
|
||||
const initialPromises = [];
|
||||
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
initialPromises.push(getAssistant({ assistant_id }));
|
||||
initialPromises.push(openai.beta.assistants.retrieve(assistant_id));
|
||||
@@ -109,10 +106,10 @@ router.post('/:assistant_id', async (req, res) => {
|
||||
})),
|
||||
);
|
||||
|
||||
let updatedAssistant = await openai.beta.assistants.update(assistant_id, { tools });
|
||||
const promises = [];
|
||||
promises.push(
|
||||
withSession(
|
||||
updateAssistant,
|
||||
updateAssistant(
|
||||
{ assistant_id },
|
||||
{
|
||||
actions,
|
||||
@@ -120,29 +117,26 @@ router.post('/:assistant_id', async (req, res) => {
|
||||
},
|
||||
),
|
||||
);
|
||||
promises.push(openai.beta.assistants.update(assistant_id, { tools }));
|
||||
promises.push(
|
||||
withSession(updateAction, { action_id }, { metadata, assistant_id, user: req.user.id }),
|
||||
);
|
||||
promises.push(updateAction({ action_id }, { metadata, assistant_id, user: req.user.id }));
|
||||
|
||||
/** @type {[AssistantDocument, Assistant, Action]} */
|
||||
const resolved = await Promise.all(promises);
|
||||
/** @type {[AssistantDocument, Action]} */
|
||||
let [assistantDocument, updatedAction] = await Promise.all(promises);
|
||||
const sensitiveFields = ['api_key', 'oauth_client_id', 'oauth_client_secret'];
|
||||
for (let field of sensitiveFields) {
|
||||
if (resolved[2].metadata[field]) {
|
||||
delete resolved[2].metadata[field];
|
||||
if (updatedAction.metadata[field]) {
|
||||
delete updatedAction.metadata[field];
|
||||
}
|
||||
}
|
||||
|
||||
/* Map Azure OpenAI model to the assistant as defined by config */
|
||||
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
resolved[1] = {
|
||||
...resolved[1],
|
||||
updatedAssistant = {
|
||||
...updatedAssistant,
|
||||
model: req.body.model,
|
||||
};
|
||||
}
|
||||
|
||||
res.json(resolved);
|
||||
res.json([assistantDocument, updatedAssistant, updatedAction]);
|
||||
} catch (error) {
|
||||
const message = 'Trouble updating the Assistant Action';
|
||||
logger.error(message, error);
|
||||
@@ -161,9 +155,7 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
|
||||
try {
|
||||
const { assistant_id, action_id, model } = req.params;
|
||||
req.body.model = model;
|
||||
|
||||
/** @type {{ openai: OpenAI }} */
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
const { openai } = await getOpenAIClient({ req, res });
|
||||
|
||||
const initialPromises = [];
|
||||
initialPromises.push(getAssistant({ assistant_id }));
|
||||
@@ -190,10 +182,11 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
|
||||
(tool) => !(tool.function && tool.function.name.includes(domain)),
|
||||
);
|
||||
|
||||
await openai.beta.assistants.update(assistant_id, { tools: updatedTools });
|
||||
|
||||
const promises = [];
|
||||
promises.push(
|
||||
withSession(
|
||||
updateAssistant,
|
||||
updateAssistant(
|
||||
{ assistant_id },
|
||||
{
|
||||
actions: updatedActions,
|
||||
@@ -201,8 +194,7 @@ router.delete('/:assistant_id/:action_id/:model', async (req, res) => {
|
||||
},
|
||||
),
|
||||
);
|
||||
promises.push(openai.beta.assistants.update(assistant_id, { tools: updatedTools }));
|
||||
promises.push(withSession(deleteAction, { action_id }));
|
||||
promises.push(deleteAction({ action_id }));
|
||||
|
||||
await Promise.all(promises);
|
||||
res.status(200).json({ message: 'Action deleted successfully' });
|
||||
|
||||
25
api/server/routes/assistants/chatV1.js
Normal file
25
api/server/routes/assistants/chatV1.js
Normal file
@@ -0,0 +1,25 @@
|
||||
const express = require('express');
|
||||
|
||||
const router = express.Router();
|
||||
const {
|
||||
setHeaders,
|
||||
handleAbort,
|
||||
validateModel,
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const chatController = require('~/server/controllers/assistants/chatV1');
|
||||
|
||||
router.post('/abort', handleAbort());
|
||||
|
||||
/**
|
||||
* @route POST /
|
||||
* @desc Chat with an assistant
|
||||
* @access Public
|
||||
* @param {express.Request} req - The request object, containing the request data.
|
||||
* @param {express.Response} res - The response object, used to send back a response.
|
||||
* @returns {void}
|
||||
*/
|
||||
router.post('/', validateModel, buildEndpointOption, setHeaders, chatController);
|
||||
|
||||
module.exports = router;
|
||||
25
api/server/routes/assistants/chatV2.js
Normal file
25
api/server/routes/assistants/chatV2.js
Normal file
@@ -0,0 +1,25 @@
|
||||
const express = require('express');
|
||||
|
||||
const router = express.Router();
|
||||
const {
|
||||
setHeaders,
|
||||
handleAbort,
|
||||
validateModel,
|
||||
// validateEndpoint,
|
||||
buildEndpointOption,
|
||||
} = require('~/server/middleware');
|
||||
const chatController = require('~/server/controllers/assistants/chatV2');
|
||||
|
||||
router.post('/abort', handleAbort());
|
||||
|
||||
/**
|
||||
* @route POST /
|
||||
* @desc Chat with an assistant
|
||||
* @access Public
|
||||
* @param {express.Request} req - The request object, containing the request data.
|
||||
* @param {express.Response} res - The response object, used to send back a response.
|
||||
* @returns {void}
|
||||
*/
|
||||
router.post('/', validateModel, buildEndpointOption, setHeaders, chatController);
|
||||
|
||||
module.exports = router;
|
||||
@@ -7,16 +7,19 @@ const {
|
||||
// concurrentLimiter,
|
||||
// messageIpLimiter,
|
||||
// messageUserLimiter,
|
||||
} = require('../../middleware');
|
||||
} = require('~/server/middleware');
|
||||
|
||||
const assistants = require('./assistants');
|
||||
const chat = require('./chat');
|
||||
const v1 = require('./v1');
|
||||
const chatV1 = require('./chatV1');
|
||||
const v2 = require('./v2');
|
||||
const chatV2 = require('./chatV2');
|
||||
|
||||
router.use(requireJwtAuth);
|
||||
router.use(checkBan);
|
||||
router.use(uaParser);
|
||||
|
||||
router.use('/', assistants);
|
||||
router.use('/chat', chat);
|
||||
router.use('/v1/', v1);
|
||||
router.use('/v1/chat', chatV1);
|
||||
router.use('/v2/', v2);
|
||||
router.use('/v2/chat', chatV2);
|
||||
|
||||
module.exports = router;
|
||||
|
||||
81
api/server/routes/assistants/v1.js
Normal file
81
api/server/routes/assistants/v1.js
Normal file
@@ -0,0 +1,81 @@
|
||||
const multer = require('multer');
|
||||
const express = require('express');
|
||||
const controllers = require('~/server/controllers/assistants/v1');
|
||||
const actions = require('./actions');
|
||||
const tools = require('./tools');
|
||||
|
||||
const upload = multer();
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Assistant actions route.
|
||||
* @route GET|POST /assistants/actions
|
||||
*/
|
||||
router.use('/actions', actions);
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
* @route GET /assistants/tools
|
||||
* @returns {TPlugin[]} 200 - application/json
|
||||
*/
|
||||
router.use('/tools', tools);
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
* @route POST /assistants
|
||||
* @param {AssistantCreateParams} req.body - The assistant creation parameters.
|
||||
* @returns {Assistant} 201 - success response - application/json
|
||||
*/
|
||||
router.post('/', controllers.createAssistant);
|
||||
|
||||
/**
|
||||
* Retrieves an assistant.
|
||||
* @route GET /assistants/:id
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/:id', controllers.retrieveAssistant);
|
||||
|
||||
/**
|
||||
* Modifies an assistant.
|
||||
* @route PATCH /assistants/:id
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @param {AssistantUpdateParams} req.body - The assistant update parameters.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.patch('/:id', controllers.patchAssistant);
|
||||
|
||||
/**
|
||||
* Deletes an assistant.
|
||||
* @route DELETE /assistants/:id
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.delete('/:id', controllers.deleteAssistant);
|
||||
|
||||
/**
|
||||
* Returns a list of assistants.
|
||||
* @route GET /assistants
|
||||
* @param {AssistantListParams} req.query - The assistant list parameters for pagination and sorting.
|
||||
* @returns {AssistantListResponse} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/', controllers.listAssistants);
|
||||
|
||||
/**
|
||||
* Returns a list of the user's assistant documents (metadata saved to database).
|
||||
* @route GET /assistants/documents
|
||||
* @returns {AssistantDocument[]} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/documents', controllers.getAssistantDocuments);
|
||||
|
||||
/**
|
||||
* Uploads and updates an avatar for a specific assistant.
|
||||
* @route POST /avatar/:assistant_id
|
||||
* @param {string} req.params.assistant_id - The ID of the assistant.
|
||||
* @param {Express.Multer.File} req.file - The avatar image file.
|
||||
* @param {string} [req.body.metadata] - Optional metadata for the assistant's avatar.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
*/
|
||||
router.post('/avatar/:assistant_id', upload.single('file'), controllers.uploadAssistantAvatar);
|
||||
|
||||
module.exports = router;
|
||||
82
api/server/routes/assistants/v2.js
Normal file
82
api/server/routes/assistants/v2.js
Normal file
@@ -0,0 +1,82 @@
|
||||
const multer = require('multer');
|
||||
const express = require('express');
|
||||
const v1 = require('~/server/controllers/assistants/v1');
|
||||
const v2 = require('~/server/controllers/assistants/v2');
|
||||
const actions = require('./actions');
|
||||
const tools = require('./tools');
|
||||
|
||||
const upload = multer();
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Assistant actions route.
|
||||
* @route GET|POST /assistants/actions
|
||||
*/
|
||||
router.use('/actions', actions);
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
* @route GET /assistants/tools
|
||||
* @returns {TPlugin[]} 200 - application/json
|
||||
*/
|
||||
router.use('/tools', tools);
|
||||
|
||||
/**
|
||||
* Create an assistant.
|
||||
* @route POST /assistants
|
||||
* @param {AssistantCreateParams} req.body - The assistant creation parameters.
|
||||
* @returns {Assistant} 201 - success response - application/json
|
||||
*/
|
||||
router.post('/', v2.createAssistant);
|
||||
|
||||
/**
|
||||
* Retrieves an assistant.
|
||||
* @route GET /assistants/:id
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/:id', v1.retrieveAssistant);
|
||||
|
||||
/**
|
||||
* Modifies an assistant.
|
||||
* @route PATCH /assistants/:id
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @param {AssistantUpdateParams} req.body - The assistant update parameters.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.patch('/:id', v2.patchAssistant);
|
||||
|
||||
/**
|
||||
* Deletes an assistant.
|
||||
* @route DELETE /assistants/:id
|
||||
* @param {string} req.params.id - Assistant identifier.
|
||||
* @returns {Assistant} 200 - success response - application/json
|
||||
*/
|
||||
router.delete('/:id', v1.deleteAssistant);
|
||||
|
||||
/**
|
||||
* Returns a list of assistants.
|
||||
* @route GET /assistants
|
||||
* @param {AssistantListParams} req.query - The assistant list parameters for pagination and sorting.
|
||||
* @returns {AssistantListResponse} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/', v1.listAssistants);
|
||||
|
||||
/**
|
||||
* Returns a list of the user's assistant documents (metadata saved to database).
|
||||
* @route GET /assistants/documents
|
||||
* @returns {AssistantDocument[]} 200 - success response - application/json
|
||||
*/
|
||||
router.get('/documents', v1.getAssistantDocuments);
|
||||
|
||||
/**
|
||||
* Uploads and updates an avatar for a specific assistant.
|
||||
* @route POST /avatar/:assistant_id
|
||||
* @param {string} req.params.assistant_id - The ID of the assistant.
|
||||
* @param {Express.Multer.File} req.file - The avatar image file.
|
||||
* @param {string} [req.body.metadata] - Optional metadata for the assistant's avatar.
|
||||
* @returns {Object} 200 - success response - application/json
|
||||
*/
|
||||
router.post('/avatar/:assistant_id', upload.single('file'), v1.uploadAssistantAvatar);
|
||||
|
||||
module.exports = router;
|
||||
@@ -14,6 +14,7 @@ router.get('/', async function (req, res) {
|
||||
};
|
||||
|
||||
try {
|
||||
/** @type {TStartupConfig} */
|
||||
const payload = {
|
||||
appTitle: process.env.APP_TITLE || 'LibreChat',
|
||||
socialLogins: req.app.locals.socialLogins ?? defaultSocialLogins,
|
||||
@@ -44,7 +45,8 @@ router.get('/', async function (req, res) {
|
||||
isEnabled(process.env.SHOW_BIRTHDAY_ICON) ||
|
||||
process.env.SHOW_BIRTHDAY_ICON === '',
|
||||
helpAndFaqURL: process.env.HELP_AND_FAQ_URL || 'https://librechat.ai',
|
||||
interface: req.app.locals.interface,
|
||||
interface: req.app.locals.interfaceConfig,
|
||||
modelSpecs: req.app.locals.modelSpecs,
|
||||
};
|
||||
|
||||
if (typeof process.env.CUSTOM_FOOTER === 'string') {
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
const multer = require('multer');
|
||||
const express = require('express');
|
||||
const { CacheKeys } = require('librechat-data-provider');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { getConvosByPage, deleteConvos, getConvo, saveConvo } = require('~/models/Conversation');
|
||||
const { IMPORT_CONVERSATION_JOB_NAME } = require('~/server/utils/import/jobDefinition');
|
||||
const { storage, importFileFilter } = require('~/server/routes/files/multer');
|
||||
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
||||
const { forkConversation } = require('~/server/utils/import/fork');
|
||||
const { createImportLimiters } = require('~/server/middleware');
|
||||
const jobScheduler = require('~/server/utils/jobScheduler');
|
||||
const getLogStores = require('~/cache/getLogStores');
|
||||
const { sleep } = require('~/server/utils');
|
||||
const { logger } = require('~/config');
|
||||
@@ -18,7 +24,15 @@ router.get('/', async (req, res) => {
|
||||
return res.status(400).json({ error: 'Invalid page number' });
|
||||
}
|
||||
|
||||
res.status(200).send(await getConvosByPage(req.user.id, pageNumber));
|
||||
let pageSize = req.query.pageSize || 25;
|
||||
pageSize = parseInt(pageSize, 10);
|
||||
|
||||
if (isNaN(pageSize) || pageSize < 1) {
|
||||
return res.status(400).json({ error: 'Invalid page size' });
|
||||
}
|
||||
const isArchived = req.query.isArchived === 'true';
|
||||
|
||||
res.status(200).send(await getConvosByPage(req.user.id, pageNumber, pageSize, isArchived));
|
||||
});
|
||||
|
||||
router.get('/:conversationId', async (req, res) => {
|
||||
@@ -99,4 +113,80 @@ router.post('/update', async (req, res) => {
|
||||
}
|
||||
});
|
||||
|
||||
const { importIpLimiter, importUserLimiter } = createImportLimiters();
|
||||
const upload = multer({ storage: storage, fileFilter: importFileFilter });
|
||||
|
||||
/**
|
||||
* Imports a conversation from a JSON file and saves it to the database.
|
||||
* @route POST /import
|
||||
* @param {Express.Multer.File} req.file - The JSON file to import.
|
||||
* @returns {object} 201 - success response - application/json
|
||||
*/
|
||||
router.post(
|
||||
'/import',
|
||||
importIpLimiter,
|
||||
importUserLimiter,
|
||||
upload.single('file'),
|
||||
async (req, res) => {
|
||||
try {
|
||||
const filepath = req.file.path;
|
||||
const job = await jobScheduler.now(IMPORT_CONVERSATION_JOB_NAME, filepath, req.user.id);
|
||||
|
||||
res.status(201).json({ message: 'Import started', jobId: job.id });
|
||||
} catch (error) {
|
||||
logger.error('Error processing file', error);
|
||||
res.status(500).send('Error processing file');
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
/**
|
||||
* POST /fork
|
||||
* This route handles forking a conversation based on the TForkConvoRequest and responds with TForkConvoResponse.
|
||||
* @route POST /fork
|
||||
* @param {express.Request<{}, TForkConvoResponse, TForkConvoRequest>} req - Express request object.
|
||||
* @param {express.Response<TForkConvoResponse>} res - Express response object.
|
||||
* @returns {Promise<void>} - The response after forking the conversation.
|
||||
*/
|
||||
router.post('/fork', async (req, res) => {
|
||||
try {
|
||||
/** @type {TForkConvoRequest} */
|
||||
const { conversationId, messageId, option, splitAtTarget, latestMessageId } = req.body;
|
||||
const result = await forkConversation({
|
||||
requestUserId: req.user.id,
|
||||
originalConvoId: conversationId,
|
||||
targetMessageId: messageId,
|
||||
latestMessageId,
|
||||
records: true,
|
||||
splitAtTarget,
|
||||
option,
|
||||
});
|
||||
|
||||
res.json(result);
|
||||
} catch (error) {
|
||||
logger.error('Error forking conversation', error);
|
||||
res.status(500).send('Error forking conversation');
|
||||
}
|
||||
});
|
||||
|
||||
// Get the status of an import job for polling
|
||||
router.get('/import/jobs/:jobId', async (req, res) => {
|
||||
try {
|
||||
const { jobId } = req.params;
|
||||
const { userId, ...jobStatus } = await jobScheduler.getJobStatus(jobId);
|
||||
if (!jobStatus) {
|
||||
return res.status(404).json({ message: 'Job not found.' });
|
||||
}
|
||||
|
||||
if (userId !== req.user.id) {
|
||||
return res.status(403).json({ message: 'Unauthorized' });
|
||||
}
|
||||
|
||||
res.json(jobStatus);
|
||||
} catch (error) {
|
||||
logger.error('Error getting job details', error);
|
||||
res.status(500).send('Error getting job details');
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const fs = require('fs').promises;
|
||||
const express = require('express');
|
||||
const { isUUID, FileSources } = require('librechat-data-provider');
|
||||
const { isUUID, checkOpenAIStorage } = require('librechat-data-provider');
|
||||
const {
|
||||
filterFile,
|
||||
processFileUpload,
|
||||
@@ -89,7 +89,7 @@ router.get('/download/:userId/:file_id', async (req, res) => {
|
||||
return res.status(403).send('Forbidden');
|
||||
}
|
||||
|
||||
if (file.source === FileSources.openai && !file.model) {
|
||||
if (checkOpenAIStorage(file.source) && !file.model) {
|
||||
logger.warn(`${errorPrefix} has no associated model: ${file_id}`);
|
||||
return res.status(400).send('The model used when creating this file is not available');
|
||||
}
|
||||
@@ -110,7 +110,8 @@ router.get('/download/:userId/:file_id', async (req, res) => {
|
||||
let passThrough;
|
||||
/** @type {ReadableStream | undefined} */
|
||||
let fileStream;
|
||||
if (file.source === FileSources.openai) {
|
||||
|
||||
if (checkOpenAIStorage(file.source)) {
|
||||
req.body = { model: file.model };
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
logger.debug(`Downloading file ${file_id} from OpenAI`);
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
const express = require('express');
|
||||
const createMulterInstance = require('./multer');
|
||||
const { uaParser, checkBan, requireJwtAuth, createFileLimiters } = require('~/server/middleware');
|
||||
const { createMulterInstance } = require('./multer');
|
||||
|
||||
const files = require('./files');
|
||||
const images = require('./images');
|
||||
|
||||
@@ -20,6 +20,16 @@ const storage = multer.diskStorage({
|
||||
},
|
||||
});
|
||||
|
||||
const importFileFilter = (req, file, cb) => {
|
||||
if (file.mimetype === 'application/json') {
|
||||
cb(null, true);
|
||||
} else if (path.extname(file.originalname).toLowerCase() === '.json') {
|
||||
cb(null, true);
|
||||
} else {
|
||||
cb(new Error('Only JSON files are allowed'), false);
|
||||
}
|
||||
};
|
||||
|
||||
const fileFilter = (req, file, cb) => {
|
||||
if (!file) {
|
||||
return cb(new Error('No file provided'), false);
|
||||
@@ -42,4 +52,4 @@ const createMulterInstance = async () => {
|
||||
});
|
||||
};
|
||||
|
||||
module.exports = createMulterInstance;
|
||||
module.exports = { createMulterInstance, storage, importFileFilter };
|
||||
|
||||
@@ -18,6 +18,7 @@ const config = require('./config');
|
||||
const assistants = require('./assistants');
|
||||
const files = require('./files');
|
||||
const staticRoute = require('./static');
|
||||
const share = require('./share');
|
||||
|
||||
module.exports = {
|
||||
search,
|
||||
@@ -40,4 +41,5 @@ module.exports = {
|
||||
assistants,
|
||||
files,
|
||||
staticRoute,
|
||||
share,
|
||||
};
|
||||
|
||||
@@ -4,7 +4,7 @@ const passport = require('passport');
|
||||
const express = require('express');
|
||||
const router = express.Router();
|
||||
const { setAuthTokens } = require('~/server/services/AuthService');
|
||||
const { loginLimiter, checkBan } = require('~/server/middleware');
|
||||
const { loginLimiter, checkBan, checkDomainAllowed } = require('~/server/middleware');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const domains = {
|
||||
@@ -16,6 +16,7 @@ router.use(loginLimiter);
|
||||
|
||||
const oauthHandler = async (req, res) => {
|
||||
try {
|
||||
await checkDomainAllowed(req, res);
|
||||
await checkBan(req, res);
|
||||
if (req.banned) {
|
||||
return;
|
||||
|
||||
@@ -41,29 +41,10 @@ router.get('/', async function (req, res) {
|
||||
return;
|
||||
}
|
||||
|
||||
const messages = (
|
||||
await Message.meiliSearch(
|
||||
q,
|
||||
{
|
||||
attributesToHighlight: ['text'],
|
||||
highlightPreTag: '**',
|
||||
highlightPostTag: '**',
|
||||
},
|
||||
true,
|
||||
)
|
||||
).hits.map((message) => {
|
||||
const { _formatted, ...rest } = message;
|
||||
return {
|
||||
...rest,
|
||||
searchResult: true,
|
||||
text: _formatted.text,
|
||||
};
|
||||
});
|
||||
const messages = (await Message.meiliSearch(q, undefined, true)).hits;
|
||||
const titles = (await Conversation.meiliSearch(q)).hits;
|
||||
|
||||
const sortedHits = reduceHits(messages, titles);
|
||||
// debugging:
|
||||
// logger.debug('user:', user, 'message hits:', messages.length, 'convo hits:', titles.length);
|
||||
// logger.debug('sorted hits:', sortedHits.length);
|
||||
const result = await getConvosQueried(user, sortedHits, pageNumber);
|
||||
|
||||
const activeMessages = [];
|
||||
@@ -86,8 +67,7 @@ router.get('/', async function (req, res) {
|
||||
delete result.cache;
|
||||
}
|
||||
delete result.convoMap;
|
||||
// for debugging
|
||||
// logger.debug(result, messages.length);
|
||||
|
||||
res.status(200).send(result);
|
||||
} catch (error) {
|
||||
logger.error('[/search] Error while searching messages & conversations', error);
|
||||
|
||||
75
api/server/routes/share.js
Normal file
75
api/server/routes/share.js
Normal file
@@ -0,0 +1,75 @@
|
||||
const express = require('express');
|
||||
|
||||
const {
|
||||
getSharedMessages,
|
||||
createSharedLink,
|
||||
updateSharedLink,
|
||||
getSharedLinks,
|
||||
deleteSharedLink,
|
||||
} = require('~/models/Share');
|
||||
const requireJwtAuth = require('~/server/middleware/requireJwtAuth');
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* Shared messages
|
||||
* this route does not require authentication
|
||||
*/
|
||||
router.get('/:shareId', async (req, res) => {
|
||||
const share = await getSharedMessages(req.params.shareId);
|
||||
|
||||
if (share) {
|
||||
res.status(200).json(share);
|
||||
} else {
|
||||
res.status(404).end();
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Shared links
|
||||
*/
|
||||
router.get('/', requireJwtAuth, async (req, res) => {
|
||||
let pageNumber = req.query.pageNumber || 1;
|
||||
pageNumber = parseInt(pageNumber, 10);
|
||||
|
||||
if (isNaN(pageNumber) || pageNumber < 1) {
|
||||
return res.status(400).json({ error: 'Invalid page number' });
|
||||
}
|
||||
|
||||
let pageSize = req.query.pageSize || 25;
|
||||
pageSize = parseInt(pageSize, 10);
|
||||
|
||||
if (isNaN(pageSize) || pageSize < 1) {
|
||||
return res.status(400).json({ error: 'Invalid page size' });
|
||||
}
|
||||
const isPublic = req.query.isPublic === 'true';
|
||||
res.status(200).send(await getSharedLinks(req.user.id, pageNumber, pageSize, isPublic));
|
||||
});
|
||||
|
||||
router.post('/', requireJwtAuth, async (req, res) => {
|
||||
const created = await createSharedLink(req.user.id, req.body);
|
||||
if (created) {
|
||||
res.status(200).json(created);
|
||||
} else {
|
||||
res.status(404).end();
|
||||
}
|
||||
});
|
||||
|
||||
router.patch('/', requireJwtAuth, async (req, res) => {
|
||||
const updated = await updateSharedLink(req.user.id, req.body);
|
||||
if (updated) {
|
||||
res.status(200).json(updated);
|
||||
} else {
|
||||
res.status(404).end();
|
||||
}
|
||||
});
|
||||
|
||||
router.delete('/:shareId', requireJwtAuth, async (req, res) => {
|
||||
const deleted = await deleteSharedLink(req.user.id, { shareId: req.params.shareId });
|
||||
if (deleted) {
|
||||
res.status(200).json(deleted);
|
||||
} else {
|
||||
res.status(404).end();
|
||||
}
|
||||
});
|
||||
|
||||
module.exports = router;
|
||||
@@ -1,20 +1,59 @@
|
||||
const {
|
||||
AuthTypeEnum,
|
||||
EModelEndpoint,
|
||||
actionDomainSeparator,
|
||||
CacheKeys,
|
||||
Constants,
|
||||
AuthTypeEnum,
|
||||
actionDelimiter,
|
||||
isImageVisionTool,
|
||||
actionDomainSeparator,
|
||||
} = require('librechat-data-provider');
|
||||
const { encryptV2, decryptV2 } = require('~/server/utils/crypto');
|
||||
const { getActions } = require('~/models/Action');
|
||||
const { getActions, deleteActions } = require('~/models/Action');
|
||||
const { deleteAssistant } = require('~/models/Assistant');
|
||||
const { getLogStores } = require('~/cache');
|
||||
const { logger } = require('~/config');
|
||||
|
||||
const toolNameRegex = /^[a-zA-Z0-9_-]+$/;
|
||||
|
||||
/**
|
||||
* Validates tool name against regex pattern and updates if necessary.
|
||||
* @param {object} params - The parameters for the function.
|
||||
* @param {object} params.req - Express Request.
|
||||
* @param {FunctionTool} params.tool - The tool object.
|
||||
* @param {string} params.assistant_id - The assistant ID
|
||||
* @returns {object|null} - Updated tool object or null if invalid and not an action.
|
||||
*/
|
||||
const validateAndUpdateTool = async ({ req, tool, assistant_id }) => {
|
||||
let actions;
|
||||
if (isImageVisionTool(tool)) {
|
||||
return null;
|
||||
}
|
||||
if (!toolNameRegex.test(tool.function.name)) {
|
||||
const [functionName, domain] = tool.function.name.split(actionDelimiter);
|
||||
actions = await getActions({ assistant_id, user: req.user.id }, true);
|
||||
const matchingActions = actions.filter((action) => {
|
||||
const metadata = action.metadata;
|
||||
return metadata && metadata.domain === domain;
|
||||
});
|
||||
const action = matchingActions[0];
|
||||
if (!action) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const parsedDomain = await domainParser(req, domain, true);
|
||||
|
||||
if (!parsedDomain) {
|
||||
return null;
|
||||
}
|
||||
|
||||
tool.function.name = `${functionName}${actionDelimiter}${parsedDomain}`;
|
||||
}
|
||||
return tool;
|
||||
};
|
||||
|
||||
/**
|
||||
* Encodes or decodes a domain name to/from base64, or replacing periods with a custom separator.
|
||||
*
|
||||
* Necessary because Azure OpenAI Assistants API doesn't support periods in function
|
||||
* names due to `[a-zA-Z0-9_-]*` Regex Validation, limited to a 64-character maximum.
|
||||
* Necessary due to `[a-zA-Z0-9_-]*` Regex Validation, limited to a 64-character maximum.
|
||||
*
|
||||
* @param {Express.Request} req - The Express Request object.
|
||||
* @param {string} domain - The domain name to encode/decode.
|
||||
@@ -26,10 +65,6 @@ async function domainParser(req, domain, inverse = false) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
return domain;
|
||||
}
|
||||
|
||||
const domainsCache = getLogStores(CacheKeys.ENCODED_DOMAINS);
|
||||
const cachedDomain = await domainsCache.get(domain);
|
||||
if (inverse && cachedDomain) {
|
||||
@@ -170,10 +205,29 @@ function decryptMetadata(metadata) {
|
||||
return decryptedMetadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes an action and its corresponding assistant.
|
||||
* @param {Object} params - The parameters for the function.
|
||||
* @param {OpenAIClient} params.req - The Express Request object.
|
||||
* @param {string} params.assistant_id - The ID of the assistant.
|
||||
*/
|
||||
const deleteAssistantActions = async ({ req, assistant_id }) => {
|
||||
try {
|
||||
await deleteActions({ assistant_id, user: req.user.id });
|
||||
await deleteAssistant({ assistant_id, user: req.user.id });
|
||||
} catch (error) {
|
||||
const message = 'Trouble deleting Assistant Actions for Assistant ID: ' + assistant_id;
|
||||
logger.error(message, error);
|
||||
throw new Error(message);
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
loadActionSets,
|
||||
deleteAssistantActions,
|
||||
validateAndUpdateTool,
|
||||
createActionTool,
|
||||
encryptMetadata,
|
||||
decryptMetadata,
|
||||
loadActionSets,
|
||||
domainParser,
|
||||
};
|
||||
|
||||
@@ -73,12 +73,12 @@ describe('domainParser', () => {
|
||||
const TLD = '.com';
|
||||
|
||||
// Non-azure request
|
||||
it('returns domain as is if not azure', async () => {
|
||||
it('does not return domain as is if not azure', async () => {
|
||||
const domain = `example.com${actionDomainSeparator}test${actionDomainSeparator}`;
|
||||
const result1 = await domainParser(reqNoAzure, domain, false);
|
||||
const result2 = await domainParser(reqNoAzure, domain, true);
|
||||
expect(result1).toEqual(domain);
|
||||
expect(result2).toEqual(domain);
|
||||
expect(result1).not.toEqual(domain);
|
||||
expect(result2).not.toEqual(domain);
|
||||
});
|
||||
|
||||
// Test for Empty or Null Inputs
|
||||
|
||||
@@ -1,14 +1,10 @@
|
||||
const {
|
||||
FileSources,
|
||||
EModelEndpoint,
|
||||
EImageOutputType,
|
||||
defaultSocialLogins,
|
||||
} = require('librechat-data-provider');
|
||||
const { FileSources, EModelEndpoint, getConfigDefaults } = require('librechat-data-provider');
|
||||
const { checkVariables, checkHealth, checkConfig, checkAzureVariables } = require('./start/checks');
|
||||
const { azureAssistantsDefaults, assistantsConfigSetup } = require('./start/assistants');
|
||||
const { initializeFirebase } = require('./Files/Firebase/initialize');
|
||||
const loadCustomConfig = require('./Config/loadCustomConfig');
|
||||
const handleRateLimits = require('./Config/handleRateLimits');
|
||||
const { loadDefaultInterface } = require('./start/interface');
|
||||
const { azureConfigSetup } = require('./start/azureOpenAI');
|
||||
const { loadAndFormatTools } = require('./ToolService');
|
||||
const paths = require('~/config/paths');
|
||||
@@ -22,9 +18,13 @@ const paths = require('~/config/paths');
|
||||
const AppService = async (app) => {
|
||||
/** @type {TCustomConfig}*/
|
||||
const config = (await loadCustomConfig()) ?? {};
|
||||
const configDefaults = getConfigDefaults();
|
||||
|
||||
const filteredTools = config.filteredTools;
|
||||
const includedTools = config.includedTools;
|
||||
const fileStrategy = config.fileStrategy ?? configDefaults.fileStrategy;
|
||||
const imageOutputType = config?.imageOutputType ?? configDefaults.imageOutputType;
|
||||
|
||||
const fileStrategy = config.fileStrategy ?? FileSources.local;
|
||||
const imageOutputType = config?.imageOutputType ?? EImageOutputType.PNG;
|
||||
process.env.CDN_PROVIDER = fileStrategy;
|
||||
|
||||
checkVariables();
|
||||
@@ -37,26 +37,27 @@ const AppService = async (app) => {
|
||||
/** @type {Record<string, FunctionTool} */
|
||||
const availableTools = loadAndFormatTools({
|
||||
directory: paths.structuredTools,
|
||||
filter: new Set([
|
||||
'ChatTool.js',
|
||||
'CodeSherpa.js',
|
||||
'CodeSherpaTools.js',
|
||||
'E2BTools.js',
|
||||
'extractionChain.js',
|
||||
]),
|
||||
adminFilter: filteredTools,
|
||||
adminIncluded: includedTools,
|
||||
});
|
||||
|
||||
const socialLogins = config?.registration?.socialLogins ?? defaultSocialLogins;
|
||||
const socialLogins =
|
||||
config?.registration?.socialLogins ?? configDefaults?.registration?.socialLogins;
|
||||
const interfaceConfig = loadDefaultInterface(config, configDefaults);
|
||||
|
||||
const defaultLocals = {
|
||||
paths,
|
||||
fileStrategy,
|
||||
socialLogins,
|
||||
filteredTools,
|
||||
includedTools,
|
||||
availableTools,
|
||||
imageOutputType,
|
||||
interfaceConfig,
|
||||
};
|
||||
|
||||
if (!Object.keys(config).length) {
|
||||
app.locals = {
|
||||
paths,
|
||||
fileStrategy,
|
||||
socialLogins,
|
||||
availableTools,
|
||||
imageOutputType,
|
||||
};
|
||||
|
||||
app.locals = defaultLocals;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -71,7 +72,14 @@ const AppService = async (app) => {
|
||||
}
|
||||
|
||||
if (config?.endpoints?.[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
endpointLocals[EModelEndpoint.assistants] = azureAssistantsDefaults();
|
||||
endpointLocals[EModelEndpoint.azureAssistants] = azureAssistantsDefaults();
|
||||
}
|
||||
|
||||
if (config?.endpoints?.[EModelEndpoint.azureAssistants]) {
|
||||
endpointLocals[EModelEndpoint.azureAssistants] = assistantsConfigSetup(
|
||||
config,
|
||||
endpointLocals[EModelEndpoint.azureAssistants],
|
||||
);
|
||||
}
|
||||
|
||||
if (config?.endpoints?.[EModelEndpoint.assistants]) {
|
||||
@@ -82,12 +90,8 @@ const AppService = async (app) => {
|
||||
}
|
||||
|
||||
app.locals = {
|
||||
paths,
|
||||
socialLogins,
|
||||
fileStrategy,
|
||||
availableTools,
|
||||
imageOutputType,
|
||||
interface: config?.interface,
|
||||
...defaultLocals,
|
||||
modelSpecs: config.modelSpecs,
|
||||
fileConfig: config?.fileConfig,
|
||||
secureImageLinks: config?.secureImageLinks,
|
||||
...endpointLocals,
|
||||
|
||||
@@ -93,6 +93,16 @@ describe('AppService', () => {
|
||||
expect(app.locals).toEqual({
|
||||
socialLogins: ['testLogin'],
|
||||
fileStrategy: 'testStrategy',
|
||||
interfaceConfig: expect.objectContaining({
|
||||
privacyPolicy: undefined,
|
||||
termsOfService: undefined,
|
||||
endpointsMenu: true,
|
||||
modelSelect: true,
|
||||
parameters: true,
|
||||
sidePanel: true,
|
||||
presets: true,
|
||||
}),
|
||||
modelSpecs: undefined,
|
||||
availableTools: {
|
||||
ExampleTool: {
|
||||
type: 'function',
|
||||
@@ -109,7 +119,6 @@ describe('AppService', () => {
|
||||
},
|
||||
paths: expect.anything(),
|
||||
imageOutputType: expect.any(String),
|
||||
interface: undefined,
|
||||
fileConfig: undefined,
|
||||
secureImageLinks: undefined,
|
||||
});
|
||||
@@ -181,7 +190,6 @@ describe('AppService', () => {
|
||||
|
||||
expect(loadAndFormatTools).toHaveBeenCalledWith({
|
||||
directory: expect.anything(),
|
||||
filter: expect.anything(),
|
||||
});
|
||||
|
||||
expect(app.locals.availableTools.ExampleTool).toBeDefined();
|
||||
@@ -245,8 +253,8 @@ describe('AppService', () => {
|
||||
process.env.EASTUS_API_KEY = 'eastus-key';
|
||||
|
||||
await AppService(app);
|
||||
expect(app.locals).toHaveProperty(EModelEndpoint.assistants);
|
||||
expect(app.locals[EModelEndpoint.assistants].capabilities.length).toEqual(3);
|
||||
expect(app.locals).toHaveProperty(EModelEndpoint.azureAssistants);
|
||||
expect(app.locals[EModelEndpoint.azureAssistants].capabilities.length).toEqual(3);
|
||||
});
|
||||
|
||||
it('should correctly configure Azure OpenAI endpoint based on custom config', async () => {
|
||||
@@ -339,6 +347,69 @@ describe('AppService', () => {
|
||||
expect(process.env.FILE_UPLOAD_USER_MAX).toEqual('initialUserMax');
|
||||
expect(process.env.FILE_UPLOAD_USER_WINDOW).toEqual('initialUserWindow');
|
||||
});
|
||||
|
||||
it('should not modify IMPORT environment variables without rate limits', async () => {
|
||||
// Setup initial environment variables
|
||||
process.env.IMPORT_IP_MAX = '10';
|
||||
process.env.IMPORT_IP_WINDOW = '15';
|
||||
process.env.IMPORT_USER_MAX = '5';
|
||||
process.env.IMPORT_USER_WINDOW = '20';
|
||||
|
||||
const initialEnv = { ...process.env };
|
||||
|
||||
await AppService(app);
|
||||
|
||||
// Expect environment variables to remain unchanged
|
||||
expect(process.env.IMPORT_IP_MAX).toEqual(initialEnv.IMPORT_IP_MAX);
|
||||
expect(process.env.IMPORT_IP_WINDOW).toEqual(initialEnv.IMPORT_IP_WINDOW);
|
||||
expect(process.env.IMPORT_USER_MAX).toEqual(initialEnv.IMPORT_USER_MAX);
|
||||
expect(process.env.IMPORT_USER_WINDOW).toEqual(initialEnv.IMPORT_USER_WINDOW);
|
||||
});
|
||||
|
||||
it('should correctly set IMPORT environment variables based on rate limits', async () => {
|
||||
// Define and mock a custom configuration with rate limits
|
||||
const importLimitsConfig = {
|
||||
rateLimits: {
|
||||
conversationsImport: {
|
||||
ipMax: '150',
|
||||
ipWindowInMinutes: '60',
|
||||
userMax: '50',
|
||||
userWindowInMinutes: '30',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
require('./Config/loadCustomConfig').mockImplementationOnce(() =>
|
||||
Promise.resolve(importLimitsConfig),
|
||||
);
|
||||
|
||||
await AppService(app);
|
||||
|
||||
// Verify that process.env has been updated according to the rate limits config
|
||||
expect(process.env.IMPORT_IP_MAX).toEqual('150');
|
||||
expect(process.env.IMPORT_IP_WINDOW).toEqual('60');
|
||||
expect(process.env.IMPORT_USER_MAX).toEqual('50');
|
||||
expect(process.env.IMPORT_USER_WINDOW).toEqual('30');
|
||||
});
|
||||
|
||||
it('should fallback to default IMPORT environment variables when rate limits are unspecified', async () => {
|
||||
// Setup initial environment variables to non-default values
|
||||
process.env.IMPORT_IP_MAX = 'initialMax';
|
||||
process.env.IMPORT_IP_WINDOW = 'initialWindow';
|
||||
process.env.IMPORT_USER_MAX = 'initialUserMax';
|
||||
process.env.IMPORT_USER_WINDOW = 'initialUserWindow';
|
||||
|
||||
// Mock a custom configuration without specific rate limits
|
||||
require('./Config/loadCustomConfig').mockImplementationOnce(() => Promise.resolve({}));
|
||||
|
||||
await AppService(app);
|
||||
|
||||
// Verify that process.env falls back to the initial values
|
||||
expect(process.env.IMPORT_IP_MAX).toEqual('initialMax');
|
||||
expect(process.env.IMPORT_IP_WINDOW).toEqual('initialWindow');
|
||||
expect(process.env.IMPORT_USER_MAX).toEqual('initialUserMax');
|
||||
expect(process.env.IMPORT_USER_WINDOW).toEqual('initialUserWindow');
|
||||
});
|
||||
});
|
||||
|
||||
describe('AppService updating app.locals and issuing warnings', () => {
|
||||
|
||||
@@ -78,7 +78,7 @@ async function createOnTextProgress({
|
||||
* @return {Promise<OpenAIAssistantFinish | OpenAIAssistantAction[] | ThreadMessage[] | RequiredActionFunctionToolCall[]>}
|
||||
*/
|
||||
async function getResponse({ openai, run_id, thread_id }) {
|
||||
const run = await waitForRun({ openai, run_id, thread_id, pollIntervalMs: 500 });
|
||||
const run = await waitForRun({ openai, run_id, thread_id, pollIntervalMs: 2000 });
|
||||
|
||||
if (run.status === RunStatus.COMPLETED) {
|
||||
const messages = await openai.beta.threads.messages.list(thread_id, defaultOrderQuery);
|
||||
@@ -393,8 +393,9 @@ async function runAssistant({
|
||||
},
|
||||
});
|
||||
|
||||
const { endpoint = EModelEndpoint.azureAssistants } = openai.req.body;
|
||||
/** @type {TCustomConfig.endpoints.assistants} */
|
||||
const assistantsEndpointConfig = openai.req.app.locals?.[EModelEndpoint.assistants] ?? {};
|
||||
const assistantsEndpointConfig = openai.req.app.locals?.[endpoint] ?? {};
|
||||
const { pollIntervalMs, timeoutMs } = assistantsEndpointConfig;
|
||||
|
||||
const run = await waitForRun({
|
||||
|
||||
@@ -2,7 +2,7 @@ const crypto = require('crypto');
|
||||
const bcrypt = require('bcryptjs');
|
||||
const { errorsToString } = require('librechat-data-provider');
|
||||
const { registerSchema } = require('~/strategies/validators');
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const isDomainAllowed = require('./isDomainAllowed');
|
||||
const Token = require('~/models/schema/tokenSchema');
|
||||
const { sendEmail } = require('~/server/utils');
|
||||
const Session = require('~/models/Session');
|
||||
@@ -14,27 +14,6 @@ const domains = {
|
||||
server: process.env.DOMAIN_SERVER,
|
||||
};
|
||||
|
||||
async function isDomainAllowed(email) {
|
||||
if (!email) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const domain = email.split('@')[1];
|
||||
|
||||
if (!domain) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const customConfig = await getCustomConfig();
|
||||
if (!customConfig) {
|
||||
return true;
|
||||
} else if (!customConfig?.registration?.allowedDomains) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return customConfig.registration.allowedDomains.includes(domain);
|
||||
}
|
||||
|
||||
const isProduction = process.env.NODE_ENV === 'production';
|
||||
|
||||
/**
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
const getCustomConfig = require('~/server/services/Config/getCustomConfig');
|
||||
const { isDomainAllowed } = require('./AuthService');
|
||||
|
||||
jest.mock('~/server/services/Config/getCustomConfig', () => jest.fn());
|
||||
|
||||
describe('isDomainAllowed', () => {
|
||||
it('should allow domain when customConfig is not available', async () => {
|
||||
getCustomConfig.mockResolvedValue(null);
|
||||
await expect(isDomainAllowed('test@domain1.com')).resolves.toBe(true);
|
||||
});
|
||||
|
||||
it('should allow domain when allowedDomains is not defined in customConfig', async () => {
|
||||
getCustomConfig.mockResolvedValue({});
|
||||
await expect(isDomainAllowed('test@domain1.com')).resolves.toBe(true);
|
||||
});
|
||||
|
||||
it('should reject an email if it is falsy', async () => {
|
||||
getCustomConfig.mockResolvedValue({});
|
||||
await expect(isDomainAllowed('')).resolves.toBe(false);
|
||||
});
|
||||
|
||||
it('should allow a domain if it is included in the allowedDomains', async () => {
|
||||
getCustomConfig.mockResolvedValue({
|
||||
registration: {
|
||||
allowedDomains: ['domain1.com', 'domain2.com'],
|
||||
},
|
||||
});
|
||||
await expect(isDomainAllowed('user@domain1.com')).resolves.toBe(true);
|
||||
});
|
||||
|
||||
it('should reject a domain if it is not included in the allowedDomains', async () => {
|
||||
getCustomConfig.mockResolvedValue({
|
||||
registration: {
|
||||
allowedDomains: ['domain1.com', 'domain2.com'],
|
||||
},
|
||||
});
|
||||
await expect(isDomainAllowed('user@domain3.com')).resolves.toBe(false);
|
||||
});
|
||||
});
|
||||
@@ -3,6 +3,7 @@ const { isUserProvided, generateConfig } = require('~/server/utils');
|
||||
|
||||
const {
|
||||
OPENAI_API_KEY: openAIApiKey,
|
||||
AZURE_ASSISTANTS_API_KEY: azureAssistantsApiKey,
|
||||
ASSISTANTS_API_KEY: assistantsApiKey,
|
||||
AZURE_API_KEY: azureOpenAIApiKey,
|
||||
ANTHROPIC_API_KEY: anthropicApiKey,
|
||||
@@ -13,6 +14,7 @@ const {
|
||||
OPENAI_REVERSE_PROXY,
|
||||
AZURE_OPENAI_BASEURL,
|
||||
ASSISTANTS_BASE_URL,
|
||||
AZURE_ASSISTANTS_BASE_URL,
|
||||
} = process.env ?? {};
|
||||
|
||||
const useAzurePlugins = !!PLUGINS_USE_AZURE;
|
||||
@@ -28,11 +30,20 @@ module.exports = {
|
||||
useAzurePlugins,
|
||||
userProvidedOpenAI,
|
||||
googleKey,
|
||||
[EModelEndpoint.openAI]: generateConfig(openAIApiKey, OPENAI_REVERSE_PROXY),
|
||||
[EModelEndpoint.assistants]: generateConfig(assistantsApiKey, ASSISTANTS_BASE_URL, true),
|
||||
[EModelEndpoint.azureOpenAI]: generateConfig(azureOpenAIApiKey, AZURE_OPENAI_BASEURL),
|
||||
[EModelEndpoint.chatGPTBrowser]: generateConfig(chatGPTToken),
|
||||
[EModelEndpoint.anthropic]: generateConfig(anthropicApiKey),
|
||||
[EModelEndpoint.bingAI]: generateConfig(bingToken),
|
||||
[EModelEndpoint.anthropic]: generateConfig(anthropicApiKey),
|
||||
[EModelEndpoint.chatGPTBrowser]: generateConfig(chatGPTToken),
|
||||
[EModelEndpoint.openAI]: generateConfig(openAIApiKey, OPENAI_REVERSE_PROXY),
|
||||
[EModelEndpoint.azureOpenAI]: generateConfig(azureOpenAIApiKey, AZURE_OPENAI_BASEURL),
|
||||
[EModelEndpoint.assistants]: generateConfig(
|
||||
assistantsApiKey,
|
||||
ASSISTANTS_BASE_URL,
|
||||
EModelEndpoint.assistants,
|
||||
),
|
||||
[EModelEndpoint.azureAssistants]: generateConfig(
|
||||
azureAssistantsApiKey,
|
||||
AZURE_ASSISTANTS_BASE_URL,
|
||||
EModelEndpoint.azureAssistants,
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
@@ -6,17 +6,24 @@ const handleRateLimits = (rateLimits) => {
|
||||
if (!rateLimits) {
|
||||
return;
|
||||
}
|
||||
const { fileUploads } = rateLimits;
|
||||
if (!fileUploads) {
|
||||
return;
|
||||
const { fileUploads, conversationsImport } = rateLimits;
|
||||
if (fileUploads) {
|
||||
process.env.FILE_UPLOAD_IP_MAX = fileUploads.ipMax ?? process.env.FILE_UPLOAD_IP_MAX;
|
||||
process.env.FILE_UPLOAD_IP_WINDOW =
|
||||
fileUploads.ipWindowInMinutes ?? process.env.FILE_UPLOAD_IP_WINDOW;
|
||||
process.env.FILE_UPLOAD_USER_MAX = fileUploads.userMax ?? process.env.FILE_UPLOAD_USER_MAX;
|
||||
process.env.FILE_UPLOAD_USER_WINDOW =
|
||||
fileUploads.userWindowInMinutes ?? process.env.FILE_UPLOAD_USER_WINDOW;
|
||||
}
|
||||
|
||||
process.env.FILE_UPLOAD_IP_MAX = fileUploads.ipMax ?? process.env.FILE_UPLOAD_IP_MAX;
|
||||
process.env.FILE_UPLOAD_IP_WINDOW =
|
||||
fileUploads.ipWindowInMinutes ?? process.env.FILE_UPLOAD_IP_WINDOW;
|
||||
process.env.FILE_UPLOAD_USER_MAX = fileUploads.userMax ?? process.env.FILE_UPLOAD_USER_MAX;
|
||||
process.env.FILE_UPLOAD_USER_WINDOW =
|
||||
fileUploads.userWindowInMinutes ?? process.env.FILE_UPLOAD_USER_WINDOW;
|
||||
if (conversationsImport) {
|
||||
process.env.IMPORT_IP_MAX = conversationsImport.ipMax ?? process.env.IMPORT_IP_MAX;
|
||||
process.env.IMPORT_IP_WINDOW =
|
||||
conversationsImport.ipWindowInMinutes ?? process.env.IMPORT_IP_WINDOW;
|
||||
process.env.IMPORT_USER_MAX = conversationsImport.userMax ?? process.env.IMPORT_USER_MAX;
|
||||
process.env.IMPORT_USER_WINDOW =
|
||||
conversationsImport.userWindowInMinutes ?? process.env.IMPORT_USER_WINDOW;
|
||||
}
|
||||
};
|
||||
|
||||
module.exports = handleRateLimits;
|
||||
|
||||
@@ -53,7 +53,7 @@ async function loadConfigEndpoints(req) {
|
||||
|
||||
if (req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
/** @type {Omit<TConfig, 'order'>} */
|
||||
endpointsConfig[EModelEndpoint.assistants] = {
|
||||
endpointsConfig[EModelEndpoint.azureAssistants] = {
|
||||
userProvide: false,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ async function loadConfigModels(req) {
|
||||
}
|
||||
|
||||
if (azureEndpoint?.assistants && azureConfig.assistantModels) {
|
||||
modelsConfig[EModelEndpoint.assistants] = azureConfig.assistantModels;
|
||||
modelsConfig[EModelEndpoint.azureAssistants] = azureConfig.assistantModels;
|
||||
}
|
||||
|
||||
if (!Array.isArray(endpoints[EModelEndpoint.custom])) {
|
||||
|
||||
@@ -46,6 +46,15 @@ const exampleConfig = {
|
||||
fetch: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'MLX',
|
||||
apiKey: 'user_provided',
|
||||
baseURL: 'http://localhost:8080/v1/',
|
||||
models: {
|
||||
default: ['Meta-Llama-3-8B-Instruct-4bit'],
|
||||
fetch: false,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
@@ -37,11 +37,17 @@ async function loadCustomConfig() {
|
||||
if (!customConfig) {
|
||||
i === 0 &&
|
||||
logger.info(
|
||||
'Custom config file missing or YAML format invalid.\n\nCheck out the latest config file guide for configurable options and features.\nhttps://docs.librechat.ai/install/configuration/custom_config.html\n\n',
|
||||
'Custom config file missing or YAML format invalid.\n\nCheck out the latest config file guide for configurable options and features.\nhttps://www.librechat.ai/docs/configuration/librechat_yaml\n\n',
|
||||
);
|
||||
i === 0 && i++;
|
||||
return null;
|
||||
}
|
||||
|
||||
if (customConfig.reason || customConfig.stack) {
|
||||
i === 0 && logger.error('Config file YAML format is invalid:', customConfig);
|
||||
i === 0 && i++;
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof customConfig === 'string') {
|
||||
@@ -66,7 +72,7 @@ Please specify a correct \`imageOutputType\` value (case-sensitive).
|
||||
- ${EImageOutputType.WEBP}
|
||||
|
||||
Refer to the latest config file guide for more information:
|
||||
https://docs.librechat.ai/install/configuration/custom_config.html`,
|
||||
https://www.librechat.ai/docs/configuration/librechat_yaml`,
|
||||
);
|
||||
}
|
||||
if (!result.success) {
|
||||
@@ -84,6 +90,10 @@ Please specify a correct \`imageOutputType\` value (case-sensitive).
|
||||
await cache.set(CacheKeys.CUSTOM_CONFIG, customConfig);
|
||||
}
|
||||
|
||||
if (result.data.modelSpecs) {
|
||||
customConfig.modelSpecs = result.data.modelSpecs;
|
||||
}
|
||||
|
||||
return customConfig;
|
||||
}
|
||||
|
||||
|
||||
@@ -9,13 +9,15 @@ const { config } = require('./EndpointService');
|
||||
*/
|
||||
async function loadDefaultEndpointsConfig(req) {
|
||||
const { google, gptPlugins } = await loadAsyncEndpoints(req);
|
||||
const { openAI, assistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } = config;
|
||||
const { openAI, assistants, azureAssistants, bingAI, anthropic, azureOpenAI, chatGPTBrowser } =
|
||||
config;
|
||||
|
||||
const enabledEndpoints = getEnabledEndpoints();
|
||||
|
||||
const endpointConfig = {
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
[EModelEndpoint.assistants]: assistants,
|
||||
[EModelEndpoint.azureAssistants]: azureAssistants,
|
||||
[EModelEndpoint.azureOpenAI]: azureOpenAI,
|
||||
[EModelEndpoint.google]: google,
|
||||
[EModelEndpoint.bingAI]: bingAI,
|
||||
|
||||
@@ -25,6 +25,7 @@ async function loadDefaultModels(req) {
|
||||
plugins: true,
|
||||
});
|
||||
const assistants = await getOpenAIModels({ assistants: true });
|
||||
const azureAssistants = await getOpenAIModels({ azureAssistants: true });
|
||||
|
||||
return {
|
||||
[EModelEndpoint.openAI]: openAI,
|
||||
@@ -35,6 +36,7 @@ async function loadDefaultModels(req) {
|
||||
[EModelEndpoint.bingAI]: ['BingAI', 'Sydney'],
|
||||
[EModelEndpoint.chatGPTBrowser]: chatGPTBrowser,
|
||||
[EModelEndpoint.assistants]: assistants,
|
||||
[EModelEndpoint.azureAssistants]: azureAssistants,
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,23 @@
|
||||
const buildOptions = (endpoint, parsedBody) => {
|
||||
const { modelLabel, promptPrefix, resendFiles, ...rest } = parsedBody;
|
||||
const {
|
||||
modelLabel,
|
||||
promptPrefix,
|
||||
maxContextTokens,
|
||||
resendFiles,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
...rest
|
||||
} = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
modelLabel,
|
||||
promptPrefix,
|
||||
resendFiles,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
const buildOptions = (endpoint, parsedBody) => {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const { promptPrefix, assistant_id, ...rest } = parsedBody;
|
||||
const { promptPrefix, assistant_id, iconURL, greeting, spec, ...rest } = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
promptPrefix,
|
||||
assistant_id,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
|
||||
@@ -2,95 +2,8 @@ const addTitle = require('./addTitle');
|
||||
const buildOptions = require('./buildOptions');
|
||||
const initializeClient = require('./initializeClient');
|
||||
|
||||
/**
|
||||
* Asynchronously lists assistants based on provided query parameters.
|
||||
*
|
||||
* Initializes the client with the current request and response objects and lists assistants
|
||||
* according to the query parameters. This function abstracts the logic for non-Azure paths.
|
||||
*
|
||||
* @async
|
||||
* @param {object} params - The parameters object.
|
||||
* @param {object} params.req - The request object, used for initializing the client.
|
||||
* @param {object} params.res - The response object, used for initializing the client.
|
||||
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
|
||||
* @returns {Promise<object>} A promise that resolves to the response from the `openai.beta.assistants.list` method call.
|
||||
*/
|
||||
const listAssistants = async ({ req, res, query }) => {
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
return openai.beta.assistants.list(query);
|
||||
};
|
||||
|
||||
/**
|
||||
* Asynchronously lists assistants for Azure configured groups.
|
||||
*
|
||||
* Iterates through Azure configured assistant groups, initializes the client with the current request and response objects,
|
||||
* lists assistants based on the provided query parameters, and merges their data alongside the model information into a single array.
|
||||
*
|
||||
* @async
|
||||
* @param {object} params - The parameters object.
|
||||
* @param {object} params.req - The request object, used for initializing the client and manipulating the request body.
|
||||
* @param {object} params.res - The response object, used for initializing the client.
|
||||
* @param {TAzureConfig} params.azureConfig - The Azure configuration object containing assistantGroups and groupMap.
|
||||
* @param {object} params.query - The query parameters to list assistants (e.g., limit, order).
|
||||
* @returns {Promise<AssistantListResponse>} A promise that resolves to an array of assistant data merged with their respective model information.
|
||||
*/
|
||||
const listAssistantsForAzure = async ({ req, res, azureConfig = {}, query }) => {
|
||||
/** @type {Array<[string, TAzureModelConfig]>} */
|
||||
const groupModelTuples = [];
|
||||
const promises = [];
|
||||
/** @type {Array<TAzureGroup>} */
|
||||
const groups = [];
|
||||
|
||||
const { groupMap, assistantGroups } = azureConfig;
|
||||
|
||||
for (const groupName of assistantGroups) {
|
||||
const group = groupMap[groupName];
|
||||
groups.push(group);
|
||||
|
||||
const currentModelTuples = Object.entries(group?.models);
|
||||
groupModelTuples.push(currentModelTuples);
|
||||
|
||||
/* The specified model is only necessary to
|
||||
fetch assistants for the shared instance */
|
||||
req.body.model = currentModelTuples[0][0];
|
||||
promises.push(listAssistants({ req, res, query }));
|
||||
}
|
||||
|
||||
const resolvedQueries = await Promise.all(promises);
|
||||
const data = resolvedQueries.flatMap((res, i) =>
|
||||
res.data.map((assistant) => {
|
||||
const deploymentName = assistant.model;
|
||||
const currentGroup = groups[i];
|
||||
const currentModelTuples = groupModelTuples[i];
|
||||
const firstModel = currentModelTuples[0][0];
|
||||
|
||||
if (currentGroup.deploymentName === deploymentName) {
|
||||
return { ...assistant, model: firstModel };
|
||||
}
|
||||
|
||||
for (const [model, modelConfig] of currentModelTuples) {
|
||||
if (modelConfig.deploymentName === deploymentName) {
|
||||
return { ...assistant, model };
|
||||
}
|
||||
}
|
||||
|
||||
return { ...assistant, model: firstModel };
|
||||
}),
|
||||
);
|
||||
|
||||
return {
|
||||
first_id: data[0]?.id,
|
||||
last_id: data[data.length - 1]?.id,
|
||||
object: 'list',
|
||||
has_more: false,
|
||||
data,
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = {
|
||||
addTitle,
|
||||
buildOptions,
|
||||
initializeClient,
|
||||
listAssistants,
|
||||
listAssistantsForAzure,
|
||||
};
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
const OpenAI = require('openai');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const {
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const { ErrorTypes, EModelEndpoint } = require('librechat-data-provider');
|
||||
const {
|
||||
getUserKeyValues,
|
||||
getUserKeyExpiry,
|
||||
@@ -13,9 +8,8 @@ const {
|
||||
} = require('~/server/services/UserService');
|
||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const { constructAzureURL } = require('~/utils');
|
||||
|
||||
const initializeClient = async ({ req, res, endpointOption, initAppClient = false }) => {
|
||||
const initializeClient = async ({ req, res, endpointOption, version, initAppClient = false }) => {
|
||||
const { PROXY, OPENAI_ORGANIZATION, ASSISTANTS_API_KEY, ASSISTANTS_BASE_URL } = process.env;
|
||||
|
||||
const userProvidesKey = isUserProvided(ASSISTANTS_API_KEY);
|
||||
@@ -34,7 +28,11 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
|
||||
let apiKey = userProvidesKey ? userValues.apiKey : ASSISTANTS_API_KEY;
|
||||
let baseURL = userProvidesURL ? userValues.baseURL : ASSISTANTS_BASE_URL;
|
||||
|
||||
const opts = {};
|
||||
const opts = {
|
||||
defaultHeaders: {
|
||||
'OpenAI-Beta': `assistants=${version}`,
|
||||
},
|
||||
};
|
||||
|
||||
const clientOptions = {
|
||||
reverseProxyUrl: baseURL ?? null,
|
||||
@@ -44,54 +42,6 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
|
||||
...endpointOption,
|
||||
};
|
||||
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||
|
||||
/** @type {AzureOptions | undefined} */
|
||||
let azureOptions;
|
||||
|
||||
if (azureConfig && azureConfig.assistants) {
|
||||
const { modelGroupMap, groupMap, assistantModels } = azureConfig;
|
||||
const modelName = req.body.model ?? req.query.model ?? assistantModels[0];
|
||||
const {
|
||||
azureOptions: currentOptions,
|
||||
baseURL: azureBaseURL,
|
||||
headers = {},
|
||||
serverless,
|
||||
} = mapModelToAzureConfig({
|
||||
modelName,
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
});
|
||||
|
||||
azureOptions = currentOptions;
|
||||
|
||||
baseURL = constructAzureURL({
|
||||
baseURL: azureBaseURL ?? 'https://${INSTANCE_NAME}.openai.azure.com/openai',
|
||||
azureOptions,
|
||||
});
|
||||
|
||||
apiKey = azureOptions.azureOpenAIApiKey;
|
||||
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
|
||||
opts.defaultHeaders = resolveHeaders({ ...headers, 'api-key': apiKey });
|
||||
opts.model = azureOptions.azureOpenAIApiDeploymentName;
|
||||
|
||||
if (initAppClient) {
|
||||
clientOptions.titleConvo = azureConfig.titleConvo;
|
||||
clientOptions.titleModel = azureConfig.titleModel;
|
||||
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
||||
|
||||
const groupName = modelGroupMap[modelName].group;
|
||||
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
||||
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
||||
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
||||
|
||||
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
||||
clientOptions.headers = opts.defaultHeaders;
|
||||
clientOptions.azure = !serverless && azureOptions;
|
||||
}
|
||||
}
|
||||
|
||||
if (userProvidesKey & !apiKey) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
@@ -125,10 +75,6 @@ const initializeClient = async ({ req, res, endpointOption, initAppClient = fals
|
||||
openai.req = req;
|
||||
openai.res = res;
|
||||
|
||||
if (azureOptions) {
|
||||
openai.locals = { ...(openai.locals ?? {}), azureOptions };
|
||||
}
|
||||
|
||||
if (endpointOption && initAppClient) {
|
||||
const client = new OpenAIClient(apiKey, clientOptions);
|
||||
return {
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
const buildOptions = (endpoint, parsedBody) => {
|
||||
// eslint-disable-next-line no-unused-vars
|
||||
const { promptPrefix, assistant_id, iconURL, greeting, spec, ...rest } = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
promptPrefix,
|
||||
assistant_id,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
};
|
||||
|
||||
return endpointOption;
|
||||
};
|
||||
|
||||
module.exports = buildOptions;
|
||||
7
api/server/services/Endpoints/azureAssistants/index.js
Normal file
7
api/server/services/Endpoints/azureAssistants/index.js
Normal file
@@ -0,0 +1,7 @@
|
||||
const buildOptions = require('./buildOptions');
|
||||
const initializeClient = require('./initializeClient');
|
||||
|
||||
module.exports = {
|
||||
buildOptions,
|
||||
initializeClient,
|
||||
};
|
||||
@@ -0,0 +1,195 @@
|
||||
const OpenAI = require('openai');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const {
|
||||
ErrorTypes,
|
||||
EModelEndpoint,
|
||||
resolveHeaders,
|
||||
mapModelToAzureConfig,
|
||||
} = require('librechat-data-provider');
|
||||
const {
|
||||
getUserKeyValues,
|
||||
getUserKeyExpiry,
|
||||
checkUserKeyExpiry,
|
||||
} = require('~/server/services/UserService');
|
||||
const OpenAIClient = require('~/app/clients/OpenAIClient');
|
||||
const { isUserProvided } = require('~/server/utils');
|
||||
const { constructAzureURL } = require('~/utils');
|
||||
|
||||
class Files {
|
||||
constructor(client) {
|
||||
this._client = client;
|
||||
}
|
||||
/**
|
||||
* Create an assistant file by attaching a
|
||||
* [File](https://platform.openai.com/docs/api-reference/files) to an
|
||||
* [assistant](https://platform.openai.com/docs/api-reference/assistants).
|
||||
*/
|
||||
create(assistantId, body, options) {
|
||||
return this._client.post(`/assistants/${assistantId}/files`, {
|
||||
body,
|
||||
...options,
|
||||
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves an AssistantFile.
|
||||
*/
|
||||
retrieve(assistantId, fileId, options) {
|
||||
return this._client.get(`/assistants/${assistantId}/files/${fileId}`, {
|
||||
...options,
|
||||
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete an assistant file.
|
||||
*/
|
||||
del(assistantId, fileId, options) {
|
||||
return this._client.delete(`/assistants/${assistantId}/files/${fileId}`, {
|
||||
...options,
|
||||
headers: { 'OpenAI-Beta': 'assistants=v1', ...options?.headers },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
const initializeClient = async ({ req, res, version, endpointOption, initAppClient = false }) => {
|
||||
const { PROXY, OPENAI_ORGANIZATION, AZURE_ASSISTANTS_API_KEY, AZURE_ASSISTANTS_BASE_URL } =
|
||||
process.env;
|
||||
|
||||
const userProvidesKey = isUserProvided(AZURE_ASSISTANTS_API_KEY);
|
||||
const userProvidesURL = isUserProvided(AZURE_ASSISTANTS_BASE_URL);
|
||||
|
||||
let userValues = null;
|
||||
if (userProvidesKey || userProvidesURL) {
|
||||
const expiresAt = await getUserKeyExpiry({
|
||||
userId: req.user.id,
|
||||
name: EModelEndpoint.azureAssistants,
|
||||
});
|
||||
checkUserKeyExpiry(expiresAt, EModelEndpoint.azureAssistants);
|
||||
userValues = await getUserKeyValues({
|
||||
userId: req.user.id,
|
||||
name: EModelEndpoint.azureAssistants,
|
||||
});
|
||||
}
|
||||
|
||||
let apiKey = userProvidesKey ? userValues.apiKey : AZURE_ASSISTANTS_API_KEY;
|
||||
let baseURL = userProvidesURL ? userValues.baseURL : AZURE_ASSISTANTS_BASE_URL;
|
||||
|
||||
const opts = {};
|
||||
|
||||
const clientOptions = {
|
||||
reverseProxyUrl: baseURL ?? null,
|
||||
proxy: PROXY ?? null,
|
||||
req,
|
||||
res,
|
||||
...endpointOption,
|
||||
};
|
||||
|
||||
/** @type {TAzureConfig | undefined} */
|
||||
const azureConfig = req.app.locals[EModelEndpoint.azureOpenAI];
|
||||
|
||||
/** @type {AzureOptions | undefined} */
|
||||
let azureOptions;
|
||||
|
||||
if (azureConfig && azureConfig.assistants) {
|
||||
const { modelGroupMap, groupMap, assistantModels } = azureConfig;
|
||||
const modelName = req.body.model ?? req.query.model ?? assistantModels[0];
|
||||
const {
|
||||
azureOptions: currentOptions,
|
||||
baseURL: azureBaseURL,
|
||||
headers = {},
|
||||
serverless,
|
||||
} = mapModelToAzureConfig({
|
||||
modelName,
|
||||
modelGroupMap,
|
||||
groupMap,
|
||||
});
|
||||
|
||||
azureOptions = currentOptions;
|
||||
|
||||
baseURL = constructAzureURL({
|
||||
baseURL: azureBaseURL ?? 'https://${INSTANCE_NAME}.openai.azure.com/openai',
|
||||
azureOptions,
|
||||
});
|
||||
|
||||
apiKey = azureOptions.azureOpenAIApiKey;
|
||||
opts.defaultQuery = { 'api-version': azureOptions.azureOpenAIApiVersion };
|
||||
opts.defaultHeaders = resolveHeaders({
|
||||
...headers,
|
||||
'api-key': apiKey,
|
||||
'OpenAI-Beta': `assistants=${version}`,
|
||||
});
|
||||
opts.model = azureOptions.azureOpenAIApiDeploymentName;
|
||||
|
||||
if (initAppClient) {
|
||||
clientOptions.titleConvo = azureConfig.titleConvo;
|
||||
clientOptions.titleModel = azureConfig.titleModel;
|
||||
clientOptions.titleMethod = azureConfig.titleMethod ?? 'completion';
|
||||
|
||||
const groupName = modelGroupMap[modelName].group;
|
||||
clientOptions.addParams = azureConfig.groupMap[groupName].addParams;
|
||||
clientOptions.dropParams = azureConfig.groupMap[groupName].dropParams;
|
||||
clientOptions.forcePrompt = azureConfig.groupMap[groupName].forcePrompt;
|
||||
|
||||
clientOptions.reverseProxyUrl = baseURL ?? clientOptions.reverseProxyUrl;
|
||||
clientOptions.headers = opts.defaultHeaders;
|
||||
clientOptions.azure = !serverless && azureOptions;
|
||||
}
|
||||
}
|
||||
|
||||
if (userProvidesKey & !apiKey) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.NO_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
if (!apiKey) {
|
||||
throw new Error('Assistants API key not provided. Please provide it again.');
|
||||
}
|
||||
|
||||
if (baseURL) {
|
||||
opts.baseURL = baseURL;
|
||||
}
|
||||
|
||||
if (PROXY) {
|
||||
opts.httpAgent = new HttpsProxyAgent(PROXY);
|
||||
}
|
||||
|
||||
if (OPENAI_ORGANIZATION) {
|
||||
opts.organization = OPENAI_ORGANIZATION;
|
||||
}
|
||||
|
||||
/** @type {OpenAIClient} */
|
||||
const openai = new OpenAI({
|
||||
apiKey,
|
||||
...opts,
|
||||
});
|
||||
|
||||
openai.beta.assistants.files = new Files(openai);
|
||||
|
||||
openai.req = req;
|
||||
openai.res = res;
|
||||
|
||||
if (azureOptions) {
|
||||
openai.locals = { ...(openai.locals ?? {}), azureOptions };
|
||||
}
|
||||
|
||||
if (endpointOption && initAppClient) {
|
||||
const client = new OpenAIClient(apiKey, clientOptions);
|
||||
return {
|
||||
client,
|
||||
openai,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
openai,
|
||||
openAIApiKey: apiKey,
|
||||
};
|
||||
};
|
||||
|
||||
module.exports = initializeClient;
|
||||
@@ -0,0 +1,112 @@
|
||||
// const OpenAI = require('openai');
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent');
|
||||
const { ErrorTypes } = require('librechat-data-provider');
|
||||
const { getUserKey, getUserKeyExpiry, getUserKeyValues } = require('~/server/services/UserService');
|
||||
const initializeClient = require('./initializeClient');
|
||||
// const { OpenAIClient } = require('~/app');
|
||||
|
||||
jest.mock('~/server/services/UserService', () => ({
|
||||
getUserKey: jest.fn(),
|
||||
getUserKeyExpiry: jest.fn(),
|
||||
getUserKeyValues: jest.fn(),
|
||||
checkUserKeyExpiry: jest.requireActual('~/server/services/UserService').checkUserKeyExpiry,
|
||||
}));
|
||||
|
||||
const today = new Date();
|
||||
const tenDaysFromToday = new Date(today.setDate(today.getDate() + 10));
|
||||
const isoString = tenDaysFromToday.toISOString();
|
||||
|
||||
describe('initializeClient', () => {
|
||||
// Set up environment variables
|
||||
const originalEnvironment = process.env;
|
||||
const app = {
|
||||
locals: {},
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
jest.resetModules(); // Clears the cache
|
||||
process.env = { ...originalEnvironment }; // Make a copy
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
process.env = originalEnvironment; // Restore original env vars
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with default API key and URL', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'default-api-key';
|
||||
process.env.AZURE_ASSISTANTS_BASE_URL = 'https://default.api.url';
|
||||
|
||||
// Assuming 'isUserProvided' to return false for this test case
|
||||
jest.mock('~/server/utils', () => ({
|
||||
isUserProvided: jest.fn().mockReturnValueOnce(false),
|
||||
}));
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||
expect(openai.apiKey).toBe('default-api-key');
|
||||
expect(openAIApiKey).toBe('default-api-key');
|
||||
expect(openai.baseURL).toBe('https://default.api.url');
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with user-provided API key and URL', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided';
|
||||
process.env.AZURE_ASSISTANTS_BASE_URL = 'user_provided';
|
||||
|
||||
getUserKeyValues.mockResolvedValue({ apiKey: 'user-api-key', baseURL: 'https://user.api.url' });
|
||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
const { openai, openAIApiKey } = await initializeClient({ req, res });
|
||||
expect(openAIApiKey).toBe('user-api-key');
|
||||
expect(openai.apiKey).toBe('user-api-key');
|
||||
expect(openai.baseURL).toBe('https://user.api.url');
|
||||
});
|
||||
|
||||
test('throws error for invalid JSON in user-provided values', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'user_provided';
|
||||
getUserKey.mockResolvedValue('invalid-json');
|
||||
getUserKeyExpiry.mockResolvedValue(isoString);
|
||||
getUserKeyValues.mockImplementation(() => {
|
||||
let userValues = getUserKey();
|
||||
try {
|
||||
userValues = JSON.parse(userValues);
|
||||
} catch (e) {
|
||||
throw new Error(
|
||||
JSON.stringify({
|
||||
type: ErrorTypes.INVALID_USER_KEY,
|
||||
}),
|
||||
);
|
||||
}
|
||||
return userValues;
|
||||
});
|
||||
|
||||
const req = { user: { id: 'user123' } };
|
||||
const res = {};
|
||||
|
||||
await expect(initializeClient({ req, res })).rejects.toThrow(/invalid_user_key/);
|
||||
});
|
||||
|
||||
test('throws error if API key is not provided', async () => {
|
||||
delete process.env.AZURE_ASSISTANTS_API_KEY; // Simulate missing API key
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
await expect(initializeClient({ req, res })).rejects.toThrow(/Assistants API key not/);
|
||||
});
|
||||
|
||||
test('initializes OpenAI client with proxy configuration', async () => {
|
||||
process.env.AZURE_ASSISTANTS_API_KEY = 'test-key';
|
||||
process.env.PROXY = 'http://proxy.server';
|
||||
|
||||
const req = { user: { id: 'user123' }, app };
|
||||
const res = {};
|
||||
|
||||
const { openai } = await initializeClient({ req, res });
|
||||
expect(openai.httpAgent).toBeInstanceOf(HttpsProxyAgent);
|
||||
});
|
||||
});
|
||||
@@ -1,5 +1,15 @@
|
||||
const buildOptions = (endpoint, parsedBody, endpointType) => {
|
||||
const { chatGptLabel, promptPrefix, resendFiles, imageDetail, ...rest } = parsedBody;
|
||||
const {
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
maxContextTokens,
|
||||
resendFiles,
|
||||
imageDetail,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
...rest
|
||||
} = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
endpointType,
|
||||
@@ -7,6 +17,10 @@ const buildOptions = (endpoint, parsedBody, endpointType) => {
|
||||
promptPrefix,
|
||||
resendFiles,
|
||||
imageDetail,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
const buildOptions = (endpoint, parsedBody) => {
|
||||
const { examples, modelLabel, promptPrefix, ...rest } = parsedBody;
|
||||
const { examples, modelLabel, promptPrefix, iconURL, greeting, spec, ...rest } = parsedBody;
|
||||
const endpointOption = {
|
||||
examples,
|
||||
endpoint,
|
||||
modelLabel,
|
||||
promptPrefix,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
|
||||
@@ -4,25 +4,26 @@ const buildOptions = (endpoint, parsedBody) => {
|
||||
promptPrefix,
|
||||
agentOptions,
|
||||
tools,
|
||||
model,
|
||||
temperature,
|
||||
top_p,
|
||||
presence_penalty,
|
||||
frequency_penalty,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
...modelOptions
|
||||
} = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
tools: tools.map((tool) => tool.pluginKey) ?? [],
|
||||
tools:
|
||||
tools
|
||||
.map((tool) => tool?.pluginKey ?? tool)
|
||||
.filter((toolName) => typeof toolName === 'string') ?? [],
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
agentOptions,
|
||||
modelOptions: {
|
||||
model,
|
||||
temperature,
|
||||
top_p,
|
||||
presence_penalty,
|
||||
frequency_penalty,
|
||||
},
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
modelOptions,
|
||||
};
|
||||
|
||||
return endpointOption;
|
||||
|
||||
@@ -1,11 +1,25 @@
|
||||
const buildOptions = (endpoint, parsedBody) => {
|
||||
const { chatGptLabel, promptPrefix, resendFiles, imageDetail, ...rest } = parsedBody;
|
||||
const {
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
maxContextTokens,
|
||||
resendFiles,
|
||||
imageDetail,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
...rest
|
||||
} = parsedBody;
|
||||
const endpointOption = {
|
||||
endpoint,
|
||||
chatGptLabel,
|
||||
promptPrefix,
|
||||
resendFiles,
|
||||
imageDetail,
|
||||
iconURL,
|
||||
greeting,
|
||||
spec,
|
||||
maxContextTokens,
|
||||
modelOptions: {
|
||||
...rest,
|
||||
},
|
||||
|
||||
@@ -180,7 +180,15 @@ const deleteFirebaseFile = async (req, file) => {
|
||||
if (!fileName.includes(req.user.id)) {
|
||||
throw new Error('Invalid file path');
|
||||
}
|
||||
await deleteFile('', fileName);
|
||||
try {
|
||||
await deleteFile('', fileName);
|
||||
} catch (error) {
|
||||
logger.error('Error deleting file from Firebase:', error);
|
||||
if (error.code === 'storage/object-not-found') {
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
||||
@@ -14,9 +14,11 @@ const { logger } = require('~/config');
|
||||
* @returns {Promise<OpenAIFile>}
|
||||
*/
|
||||
async function uploadOpenAIFile({ req, file, openai }) {
|
||||
const { height, width } = req.body;
|
||||
const isImage = height && width;
|
||||
const uploadedFile = await openai.files.create({
|
||||
file: fs.createReadStream(file.path),
|
||||
purpose: FilePurpose.Assistants,
|
||||
purpose: isImage ? FilePurpose.Vision : FilePurpose.Assistants,
|
||||
});
|
||||
|
||||
logger.debug(
|
||||
@@ -34,7 +36,7 @@ async function uploadOpenAIFile({ req, file, openai }) {
|
||||
await sleep(sleepTime);
|
||||
}
|
||||
|
||||
return uploadedFile;
|
||||
return isImage ? { ...uploadedFile, height, width } : uploadedFile;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -23,7 +23,7 @@ async function fetchImageToBase64(url) {
|
||||
}
|
||||
}
|
||||
|
||||
const base64Only = new Set([EModelEndpoint.google, EModelEndpoint.anthropic]);
|
||||
const base64Only = new Set([EModelEndpoint.google, EModelEndpoint.anthropic, 'Ollama', 'ollama']);
|
||||
|
||||
/**
|
||||
* Encodes and formats the given files.
|
||||
|
||||
@@ -10,10 +10,13 @@ const {
|
||||
EModelEndpoint,
|
||||
mergeFileConfig,
|
||||
hostImageIdSuffix,
|
||||
checkOpenAIStorage,
|
||||
hostImageNamePrefix,
|
||||
isAssistantsEndpoint,
|
||||
} = require('librechat-data-provider');
|
||||
const { addResourceFileId, deleteResourceFileId } = require('~/server/controllers/assistants/v2');
|
||||
const { convertImage, resizeAndConvert } = require('~/server/services/Files/images');
|
||||
const { initializeClient } = require('~/server/services/Endpoints/assistants');
|
||||
const { getOpenAIClient } = require('~/server/controllers/assistants/helpers');
|
||||
const { createFile, updateFileUsage, deleteFiles } = require('~/models/File');
|
||||
const { LB_QueueAsyncCall } = require('~/server/utils/queue');
|
||||
const { getStrategyFunctions } = require('./strategies');
|
||||
@@ -34,14 +37,16 @@ const processFiles = async (files) => {
|
||||
/**
|
||||
* Enqueues the delete operation to the leaky bucket queue if necessary, or adds it directly to promises.
|
||||
*
|
||||
* @param {Express.Request} req - The express request object.
|
||||
* @param {MongoFile} file - The file object to delete.
|
||||
* @param {Function} deleteFile - The delete file function.
|
||||
* @param {Promise[]} promises - The array of promises to await.
|
||||
* @param {OpenAI | undefined} [openai] - If an OpenAI file, the initialized OpenAI client.
|
||||
* @param {object} params - The passed parameters.
|
||||
* @param {Express.Request} params.req - The express request object.
|
||||
* @param {MongoFile} params.file - The file object to delete.
|
||||
* @param {Function} params.deleteFile - The delete file function.
|
||||
* @param {Promise[]} params.promises - The array of promises to await.
|
||||
* @param {string[]} params.resolvedFileIds - The array of promises to await.
|
||||
* @param {OpenAI | undefined} [params.openai] - If an OpenAI file, the initialized OpenAI client.
|
||||
*/
|
||||
function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
|
||||
if (file.source === FileSources.openai) {
|
||||
function enqueueDeleteOperation({ req, file, deleteFile, promises, resolvedFileIds, openai }) {
|
||||
if (checkOpenAIStorage(file.source)) {
|
||||
// Enqueue to leaky bucket
|
||||
promises.push(
|
||||
new Promise((resolve, reject) => {
|
||||
@@ -53,6 +58,7 @@ function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
|
||||
logger.error('Error deleting file from OpenAI source', err);
|
||||
reject(err);
|
||||
} else {
|
||||
resolvedFileIds.push(file.file_id);
|
||||
resolve(result);
|
||||
}
|
||||
},
|
||||
@@ -62,10 +68,12 @@ function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
|
||||
} else {
|
||||
// Add directly to promises
|
||||
promises.push(
|
||||
deleteFile(req, file).catch((err) => {
|
||||
logger.error('Error deleting file', err);
|
||||
return Promise.reject(err);
|
||||
}),
|
||||
deleteFile(req, file)
|
||||
.then(() => resolvedFileIds.push(file.file_id))
|
||||
.catch((err) => {
|
||||
logger.error('Error deleting file', err);
|
||||
return Promise.reject(err);
|
||||
}),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -80,35 +88,71 @@ function enqueueDeleteOperation(req, file, deleteFile, promises, openai) {
|
||||
* @param {Express.Request} params.req - The express request object.
|
||||
* @param {DeleteFilesBody} params.req.body - The request body.
|
||||
* @param {string} [params.req.body.assistant_id] - The assistant ID if file uploaded is associated to an assistant.
|
||||
* @param {string} [params.req.body.tool_resource] - The tool resource if assistant file uploaded is associated to a tool resource.
|
||||
*
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const processDeleteRequest = async ({ req, files }) => {
|
||||
const file_ids = files.map((file) => file.file_id);
|
||||
|
||||
const resolvedFileIds = [];
|
||||
const deletionMethods = {};
|
||||
const promises = [];
|
||||
promises.push(deleteFiles(file_ids));
|
||||
|
||||
/** @type {OpenAI | undefined} */
|
||||
let openai;
|
||||
if (req.body.assistant_id) {
|
||||
({ openai } = await initializeClient({ req }));
|
||||
/** @type {Record<string, OpenAI | undefined>} */
|
||||
const client = { [FileSources.openai]: undefined, [FileSources.azure]: undefined };
|
||||
const initializeClients = async () => {
|
||||
const openAIClient = await getOpenAIClient({
|
||||
req,
|
||||
overrideEndpoint: EModelEndpoint.assistants,
|
||||
});
|
||||
client[FileSources.openai] = openAIClient.openai;
|
||||
|
||||
if (!req.app.locals[EModelEndpoint.azureOpenAI]?.assistants) {
|
||||
return;
|
||||
}
|
||||
|
||||
const azureClient = await getOpenAIClient({
|
||||
req,
|
||||
overrideEndpoint: EModelEndpoint.azureAssistants,
|
||||
});
|
||||
client[FileSources.azure] = azureClient.openai;
|
||||
};
|
||||
|
||||
if (req.body.assistant_id !== undefined) {
|
||||
await initializeClients();
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const source = file.source ?? FileSources.local;
|
||||
|
||||
if (source === FileSources.openai && !openai) {
|
||||
({ openai } = await initializeClient({ req }));
|
||||
if (checkOpenAIStorage(source) && !client[source]) {
|
||||
await initializeClients();
|
||||
}
|
||||
|
||||
if (req.body.assistant_id) {
|
||||
const openai = client[source];
|
||||
|
||||
if (req.body.assistant_id && req.body.tool_resource) {
|
||||
promises.push(
|
||||
deleteResourceFileId({
|
||||
req,
|
||||
openai,
|
||||
file_id: file.file_id,
|
||||
assistant_id: req.body.assistant_id,
|
||||
tool_resource: req.body.tool_resource,
|
||||
}),
|
||||
);
|
||||
} else if (req.body.assistant_id) {
|
||||
promises.push(openai.beta.assistants.files.del(req.body.assistant_id, file.file_id));
|
||||
}
|
||||
|
||||
if (deletionMethods[source]) {
|
||||
enqueueDeleteOperation(req, file, deletionMethods[source], promises, openai);
|
||||
enqueueDeleteOperation({
|
||||
req,
|
||||
file,
|
||||
deleteFile: deletionMethods[source],
|
||||
promises,
|
||||
resolvedFileIds,
|
||||
openai,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -118,10 +162,11 @@ const processDeleteRequest = async ({ req, files }) => {
|
||||
}
|
||||
|
||||
deletionMethods[source] = deleteFile;
|
||||
enqueueDeleteOperation(req, file, deleteFile, promises, openai);
|
||||
enqueueDeleteOperation({ req, file, deleteFile, promises, resolvedFileIds, openai });
|
||||
}
|
||||
|
||||
await Promise.allSettled(promises);
|
||||
await deleteFiles(resolvedFileIds);
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -180,12 +225,13 @@ const processFileURL = async ({ fileStrategy, userId, URL, fileName, basePath, c
|
||||
*
|
||||
* @param {Object} params - The parameters object.
|
||||
* @param {Express.Request} params.req - The Express request object.
|
||||
* @param {Express.Response} params.res - The Express response object.
|
||||
* @param {Express.Response} [params.res] - The Express response object.
|
||||
* @param {Express.Multer.File} params.file - The uploaded file.
|
||||
* @param {ImageMetadata} params.metadata - Additional metadata for the file.
|
||||
* @param {boolean} params.returnFile - Whether to return the file metadata or return response as normal.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const processImageFile = async ({ req, res, file, metadata }) => {
|
||||
const processImageFile = async ({ req, res, file, metadata, returnFile = false }) => {
|
||||
const source = req.app.locals.fileStrategy;
|
||||
const { handleImageUpload } = getStrategyFunctions(source);
|
||||
const { file_id, temp_file_id, endpoint } = metadata;
|
||||
@@ -213,6 +259,10 @@ const processImageFile = async ({ req, res, file, metadata }) => {
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
||||
if (returnFile) {
|
||||
return result;
|
||||
}
|
||||
res.status(200).json({ message: 'File uploaded and processed successfully', ...result });
|
||||
};
|
||||
|
||||
@@ -274,28 +324,57 @@ const uploadImageBuffer = async ({ req, context, metadata = {}, resize = true })
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
const processFileUpload = async ({ req, res, file, metadata }) => {
|
||||
const isAssistantUpload = metadata.endpoint === EModelEndpoint.assistants;
|
||||
const source = isAssistantUpload ? FileSources.openai : FileSources.vectordb;
|
||||
const isAssistantUpload = isAssistantsEndpoint(metadata.endpoint);
|
||||
const assistantSource =
|
||||
metadata.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
|
||||
const source = isAssistantUpload ? assistantSource : FileSources.vectordb;
|
||||
const { handleFileUpload } = getStrategyFunctions(source);
|
||||
const { file_id, temp_file_id } = metadata;
|
||||
|
||||
/** @type {OpenAI | undefined} */
|
||||
let openai;
|
||||
if (source === FileSources.openai) {
|
||||
({ openai } = await initializeClient({ req }));
|
||||
if (checkOpenAIStorage(source)) {
|
||||
({ openai } = await getOpenAIClient({ req }));
|
||||
}
|
||||
|
||||
const { id, bytes, filename, filepath, embedded } = await handleFileUpload({
|
||||
const {
|
||||
id,
|
||||
bytes,
|
||||
filename,
|
||||
filepath: _filepath,
|
||||
embedded,
|
||||
height,
|
||||
width,
|
||||
} = await handleFileUpload({
|
||||
req,
|
||||
file,
|
||||
file_id,
|
||||
openai,
|
||||
});
|
||||
|
||||
if (isAssistantUpload && !metadata.message_file) {
|
||||
if (isAssistantUpload && !metadata.message_file && !metadata.tool_resource) {
|
||||
await openai.beta.assistants.files.create(metadata.assistant_id, {
|
||||
file_id: id,
|
||||
});
|
||||
} else if (isAssistantUpload && !metadata.message_file) {
|
||||
await addResourceFileId({
|
||||
req,
|
||||
openai,
|
||||
file_id: id,
|
||||
assistant_id: metadata.assistant_id,
|
||||
tool_resource: metadata.tool_resource,
|
||||
});
|
||||
}
|
||||
|
||||
let filepath = isAssistantUpload ? `${openai.baseURL}/files/${id}` : _filepath;
|
||||
if (isAssistantUpload && file.mimetype.startsWith('image')) {
|
||||
const result = await processImageFile({
|
||||
req,
|
||||
file,
|
||||
metadata: { file_id: v4() },
|
||||
returnFile: true,
|
||||
});
|
||||
filepath = result.filepath;
|
||||
}
|
||||
|
||||
const result = await createFile(
|
||||
@@ -304,13 +383,15 @@ const processFileUpload = async ({ req, res, file, metadata }) => {
|
||||
file_id: id ?? file_id,
|
||||
temp_file_id,
|
||||
bytes,
|
||||
filepath,
|
||||
filename: filename ?? file.originalname,
|
||||
filepath: isAssistantUpload ? `${openai.baseURL}/files/${id}` : filepath,
|
||||
context: isAssistantUpload ? FileContext.assistants : FileContext.message_attachment,
|
||||
model: isAssistantUpload ? req.body.model : undefined,
|
||||
type: file.mimetype,
|
||||
embedded,
|
||||
source,
|
||||
height,
|
||||
width,
|
||||
},
|
||||
true,
|
||||
);
|
||||
@@ -340,7 +421,10 @@ const processOpenAIFile = async ({
|
||||
originalName ? `/${originalName}` : ''
|
||||
}`;
|
||||
const type = mime.getType(originalName ?? file_id);
|
||||
|
||||
const source =
|
||||
openai.req.body.endpoint === EModelEndpoint.azureAssistants
|
||||
? FileSources.azure
|
||||
: FileSources.openai;
|
||||
const file = {
|
||||
..._file,
|
||||
type,
|
||||
@@ -349,7 +433,7 @@ const processOpenAIFile = async ({
|
||||
usage: 1,
|
||||
user: userId,
|
||||
context: _file.purpose,
|
||||
source: FileSources.openai,
|
||||
source,
|
||||
model: openai.req.body.model,
|
||||
filename: originalName ?? file_id,
|
||||
};
|
||||
@@ -394,12 +478,14 @@ const processOpenAIImageOutput = async ({ req, buffer, file_id, filename, fileEx
|
||||
filename: `${hostImageNamePrefix}${filename}`,
|
||||
};
|
||||
createFile(file, true);
|
||||
const source =
|
||||
req.body.endpoint === EModelEndpoint.azureAssistants ? FileSources.azure : FileSources.openai;
|
||||
createFile(
|
||||
{
|
||||
...file,
|
||||
file_id,
|
||||
filename,
|
||||
source: FileSources.openai,
|
||||
source,
|
||||
type: mime.getType(fileExt),
|
||||
},
|
||||
true,
|
||||
@@ -500,7 +586,12 @@ async function retrieveAndProcessFile({
|
||||
* Filters a file based on its size and the endpoint origin.
|
||||
*
|
||||
* @param {Object} params - The parameters for the function.
|
||||
* @param {Express.Request} params.req - The request object from Express.
|
||||
* @param {object} params.req - The request object from Express.
|
||||
* @param {string} [params.req.endpoint]
|
||||
* @param {string} [params.req.file_id]
|
||||
* @param {number} [params.req.width]
|
||||
* @param {number} [params.req.height]
|
||||
* @param {number} [params.req.version]
|
||||
* @param {Express.Multer.File} params.file - The file uploaded to the server via multer.
|
||||
* @param {boolean} [params.image] - Whether the file expected is an image.
|
||||
* @returns {void}
|
||||
@@ -514,6 +605,10 @@ function filterFile({ req, file, image }) {
|
||||
throw new Error('No file_id provided');
|
||||
}
|
||||
|
||||
if (file.size === 0) {
|
||||
throw new Error('Empty file uploaded');
|
||||
}
|
||||
|
||||
/* parse to validate api call, throws error on fail */
|
||||
isUUID.parse(file_id);
|
||||
|
||||
|
||||
@@ -111,6 +111,8 @@ const getStrategyFunctions = (fileSource) => {
|
||||
return localStrategy();
|
||||
} else if (fileSource === FileSources.openai) {
|
||||
return openAIStrategy();
|
||||
} else if (fileSource === FileSources.azure) {
|
||||
return openAIStrategy();
|
||||
} else if (fileSource === FileSources.vectordb) {
|
||||
return vectorStrategy();
|
||||
} else {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user